1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host code gen
3 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
9 
10 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
11 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
13 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
14 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
16 
17 // RUN: %clang_cc1  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
18 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
20 // RUN: %clang_cc1  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
23 
24 // RUN: %clang_cc1  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
25 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
26 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
27 // RUN: %clang_cc1  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
28 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
29 // RUN: %clang_cc1  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 
35 template <typename T>
36 T tmain() {
37   T *a, *b, *c;
38   int n = 10000;
39   int ch = 100;
40 
41   // no schedule clauses
42   #pragma omp target
43   #pragma omp teams
44   #pragma omp distribute parallel for simd
45   for (int i = 0; i < n; ++i) {
46     a[i] = b[i] + c[i];
47   }
48 
49   // dist_schedule: static no chunk
50   #pragma omp target
51   #pragma omp teams
52   #pragma omp distribute parallel for simd dist_schedule(static)
53   for (int i = 0; i < n; ++i) {
54     a[i] = b[i] + c[i];
55   }
56 
57   // dist_schedule: static chunk
58   #pragma omp target
59   #pragma omp teams
60   #pragma omp distribute parallel for simd dist_schedule(static, ch)
61   for (int i = 0; i < n; ++i) {
62     a[i] = b[i] + c[i];
63   }
64 
65   // schedule: static no chunk
66   #pragma omp target
67   #pragma omp teams
68   #pragma omp distribute parallel for simd schedule(static)
69   for (int i = 0; i < n; ++i) {
70     a[i] = b[i] + c[i];
71   }
72 
73   // schedule: static chunk
74   #pragma omp target
75   #pragma omp teams
76   #pragma omp distribute parallel for simd schedule(static, ch)
77   for (int i = 0; i < n; ++i) {
78     a[i] = b[i] + c[i];
79   }
80 
81   // schedule: dynamic no chunk
82   #pragma omp target
83   #pragma omp teams
84   #pragma omp distribute parallel for simd schedule(dynamic)
85   for (int i = 0; i < n; ++i) {
86     a[i] = b[i] + c[i];
87   }
88 
89   // schedule: dynamic chunk
90   #pragma omp target
91   #pragma omp teams
92   #pragma omp distribute parallel for simd schedule(dynamic, ch)
93   for (int i = 0; i < n; ++i) {
94     a[i] = b[i] + c[i];
95   }
96 
97   return T();
98 }
99 
100 int main() {
101   double *a, *b, *c;
102   int n = 10000;
103   int ch = 100;
104 
105 #ifdef LAMBDA
106   [&]() {
107 
108 
109 
110 
111 
112 
113 
114 
115     // no schedule clauses
116     #pragma omp target
117     #pragma omp teams
118 
119     #pragma omp distribute parallel for simd
120     for (int i = 0; i < n; ++i) {
121       a[i] = b[i] + c[i];
122 
123 
124       // check EUB for distribute
125 
126       // initialize omp.iv
127 
128       // check exit condition
129 
130       // check that PrevLB and PrevUB are passed to the 'for'
131       // check that distlb and distub are properly passed to fork_call
132 
133       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
134 
135 
136       // implementation of 'parallel for'
137 
138 
139       // initialize lb and ub to PrevLB and PrevUB
140 
141       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
142       // In this case we use EUB
143 
144       // initialize omp.iv
145 
146       // check exit condition
147 
148       // check that PrevLB and PrevUB are passed to the 'for'
149 
150       // check stride 1 for 'for' in 'distribute parallel for simd'
151 
152 
153       [&]() {
154 	a[i] = b[i] + c[i];
155       }();
156     }
157 
158     // dist_schedule: static no chunk (same sa default - no dist_schedule)
159     #pragma omp target
160     #pragma omp teams
161 
162     #pragma omp distribute parallel for simd dist_schedule(static)
163     for (int i = 0; i < n; ++i) {
164       a[i] = b[i] + c[i];
165 
166 
167       // check EUB for distribute
168 
169       // initialize omp.iv
170 
171       // check exit condition
172 
173       // check that PrevLB and PrevUB are passed to the 'for'
174       // check that distlb and distub are properly passed to fork_call
175 
176       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
177 
178 
179       // implementation of 'parallel for'
180 
181 
182       // initialize lb and ub to PrevLB and PrevUB
183 
184       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
185       // In this case we use EUB
186 
187       // initialize omp.iv
188 
189       // check exit condition
190 
191       // check that PrevLB and PrevUB are passed to the 'for'
192 
193       // check stride 1 for 'for' in 'distribute parallel for simd'
194 
195       [&]() {
196 	a[i] = b[i] + c[i];
197       }();
198     }
199 
200     // dist_schedule: static chunk
201     #pragma omp target
202     #pragma omp teams
203 
204     #pragma omp distribute parallel for simd dist_schedule(static, ch)
205     for (int i = 0; i < n; ++i) {
206       a[i] = b[i] + c[i];
207 
208 
209       // check EUB for distribute
210 
211       // initialize omp.iv
212 
213       // check exit condition
214 
215       // check that PrevLB and PrevUB are passed to the 'for'
216       // check that distlb and distub are properly passed to fork_call
217 
218       // check DistInc
219 
220       // Update UB
221 
222       // Store LB in IV
223 
224 
225       // loop exit
226 
227       // skip implementation of 'parallel for': using default scheduling and was tested above
228       [&]() {
229 	a[i] = b[i] + c[i];
230       }();
231     }
232 
233     // schedule: static no chunk
234     #pragma omp target
235     #pragma omp teams
236 
237     #pragma omp distribute parallel for simd schedule(static)
238     for (int i = 0; i < n; ++i) {
239       a[i] = b[i] + c[i];
240 
241       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
242 
243       // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
244 
245 
246       // initialize lb and ub to PrevLB and PrevUB
247 
248       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
249       // In this case we use EUB
250 
251       // initialize omp.iv
252 
253       // check exit condition
254 
255       // check that PrevLB and PrevUB are passed to the 'for'
256 
257       // check stride 1 for 'for' in 'distribute parallel for simd'
258 
259 
260       [&]() {
261 	a[i] = b[i] + c[i];
262       }();
263     }
264 
265     // schedule: static chunk
266     #pragma omp target
267     #pragma omp teams
268 
269     #pragma omp distribute parallel for simd schedule(static, ch)
270     for (int i = 0; i < n; ++i) {
271       a[i] = b[i] + c[i];
272       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
273 
274       // 'parallel for' implementation using outer and inner loops and PrevEUB
275 
276       // initialize lb and ub to PrevLB and PrevUB
277 
278       // check PrevEUB (using PrevUB instead of NumIt as upper bound)
279 
280       // initialize omp.iv (IV = LB)
281 
282       // outer loop: while (IV < UB) {
283 
284 
285 
286       // skip body branch
287 
288       // IV = IV + 1 and inner loop latch
289 
290       // check NextLB and NextUB
291 
292 
293       [&]() {
294 	a[i] = b[i] + c[i];
295       }();
296     }
297 
298     // schedule: dynamic no chunk
299     #pragma omp target
300     #pragma omp teams
301 
302     #pragma omp distribute parallel for simd schedule(dynamic)
303     for (int i = 0; i < n; ++i) {
304       a[i] = b[i] + c[i];
305       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
306 
307       // 'parallel for' implementation using outer and inner loops and PrevEUB
308 
309       // initialize lb and ub to PrevLB and PrevUB
310 
311 
312       // initialize omp.iv (IV = LB)
313 
314 
315       // skip body branch
316 
317       // IV = IV + 1 and inner loop latch
318 
319       // check NextLB and NextUB
320 
321 
322       [&]() {
323 	a[i] = b[i] + c[i];
324       }();
325     }
326 
327     // schedule: dynamic chunk
328     #pragma omp target
329     #pragma omp teams
330 
331     #pragma omp distribute parallel for simd schedule(dynamic, ch)
332     for (int i = 0; i < n; ++i) {
333       a[i] = b[i] + c[i];
334       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
335 
336       // 'parallel for' implementation using outer and inner loops and PrevEUB
337 
338       // initialize lb and ub to PrevLB and PrevUB
339 
340 
341       // initialize omp.iv (IV = LB)
342 
343 
344       // skip body branch
345 
346       // IV = IV + 1 and inner loop latch
347 
348       // check NextLB and NextUB
349 
350 
351       [&]() {
352 	a[i] = b[i] + c[i];
353       }();
354     }
355   }();
356   return 0;
357 #else
358 
359 
360 
361 
362 
363 
364 
365 
366 
367   // no schedule clauses
368   #pragma omp target
369   #pragma omp teams
370 
371   #pragma omp distribute parallel for simd
372   for (int i = 0; i < n; ++i) {
373     a[i] = b[i] + c[i];
374 
375 
376     // check EUB for distribute
377 
378     // initialize omp.iv
379 
380     // check exit condition
381 
382     // check that PrevLB and PrevUB are passed to the 'for'
383     // check that distlb and distub are properly passed to fork_call
384 
385     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
386 
387 
388     // implementation of 'parallel for'
389 
390 
391     // initialize lb and ub to PrevLB and PrevUB
392 
393     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
394     // In this case we use EUB
395 
396     // initialize omp.iv
397 
398     // check exit condition
399 
400     // check that PrevLB and PrevUB are passed to the 'for'
401 
402     // check stride 1 for 'for' in 'distribute parallel for simd'
403 
404   }
405 
406   // dist_schedule: static no chunk
407   #pragma omp target
408   #pragma omp teams
409 
410   #pragma omp distribute parallel for simd dist_schedule(static)
411   for (int i = 0; i < n; ++i) {
412     a[i] = b[i] + c[i];
413 
414 
415     // check EUB for distribute
416 
417     // initialize omp.iv
418 
419     // check exit condition
420 
421     // check that PrevLB and PrevUB are passed to the 'for'
422     // check that distlb and distub are properly passed to fork_call
423 
424     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
425 
426 
427     // implementation of 'parallel for'
428 
429 
430     // initialize lb and ub to PrevLB and PrevUB
431 
432     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
433     // In this case we use EUB
434 
435     // initialize omp.iv
436 
437     // check exit condition
438 
439     // check that PrevLB and PrevUB are passed to the 'for'
440 
441     // check stride 1 for 'for' in 'distribute parallel for simd'
442 
443   }
444 
445   // dist_schedule: static chunk
446   #pragma omp target
447   #pragma omp teams
448 
449   #pragma omp distribute parallel for simd dist_schedule(static, ch)
450   for (int i = 0; i < n; ++i) {
451     a[i] = b[i] + c[i];
452 
453     // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
454 
455     // check EUB for distribute
456 
457     // initialize omp.iv
458 
459     // check exit condition
460 
461     // check that PrevLB and PrevUB are passed to the 'for'
462     // check that distlb and distub are properly passed to fork_call
463 
464     // check DistInc
465 
466     // Update UB
467 
468     // Store LB in IV
469 
470 
471     // loop exit
472 
473     // skip implementation of 'parallel for': using default scheduling and was tested above
474   }
475 
476   // schedule: static no chunk
477   #pragma omp target
478   #pragma omp teams
479 
480   #pragma omp distribute parallel for simd schedule(static)
481   for (int i = 0; i < n; ++i) {
482     a[i] = b[i] + c[i];
483 
484     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
485 
486     // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
487 
488 
489     // initialize lb and ub to PrevLB and PrevUB
490 
491     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
492     // In this case we use EUB
493 
494     // initialize omp.iv
495 
496     // check exit condition
497 
498     // check that PrevLB and PrevUB are passed to the 'for'
499 
500     // check stride 1 for 'for' in 'distribute parallel for simd'
501 
502   }
503 
504   // schedule: static chunk
505   #pragma omp target
506   #pragma omp teams
507 
508   #pragma omp distribute parallel for simd schedule(static, ch)
509   for (int i = 0; i < n; ++i) {
510     a[i] = b[i] + c[i];
511     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
512 
513     // 'parallel for' implementation using outer and inner loops and PrevEUB
514 
515     // initialize lb and ub to PrevLB and PrevUB
516 
517     // check PrevEUB (using PrevUB instead of NumIt as upper bound)
518 
519     // initialize omp.iv (IV = LB)
520 
521     // outer loop: while (IV < UB) {
522 
523 
524 
525     // skip body branch
526 
527     // IV = IV + 1 and inner loop latch
528 
529     // check NextLB and NextUB
530 
531 
532   }
533 
534   // schedule: dynamic no chunk
535   #pragma omp target
536   #pragma omp teams
537 
538   #pragma omp distribute parallel for simd schedule(dynamic)
539   for (int i = 0; i < n; ++i) {
540     a[i] = b[i] + c[i];
541     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
542 
543     // 'parallel for' implementation using outer and inner loops and PrevEUB
544 
545     // initialize lb and ub to PrevLB and PrevUB
546 
547 
548     // initialize omp.iv (IV = LB)
549 
550 
551     // skip body branch
552 
553     // IV = IV + 1 and inner loop latch
554 
555     // check NextLB and NextUB
556 
557 
558   }
559 
560   // schedule: dynamic chunk
561   #pragma omp target
562   #pragma omp teams
563 
564   #pragma omp distribute parallel for simd schedule(dynamic, ch)
565   for (int i = 0; i < n; ++i) {
566     a[i] = b[i] + c[i];
567     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
568 
569     // 'parallel for' implementation using outer and inner loops and PrevEUB
570 
571     // initialize lb and ub to PrevLB and PrevUB
572 
573 
574     // initialize omp.iv (IV = LB)
575 
576 
577     // skip body branch
578 
579     // IV = IV + 1 and inner loop latch
580 
581     // check NextLB and NextUB
582 
583 
584   }
585 
586   return tmain<int>();
587 #endif
588 }
589 
590 // check code
591 
592 
593 
594 
595 
596 
597 
598 
599 
600 
601 
602 // check EUB for distribute
603 
604 // initialize omp.iv
605 
606 // check exit condition
607 
608 // check that PrevLB and PrevUB are passed to the 'for'
609 // check that distlb and distub are properly passed to fork_call
610 
611 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
612 
613 
614 // implementation of 'parallel for'
615 
616 
617 // initialize lb and ub to PrevLB and PrevUB
618 
619 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
620 // In this case we use EUB
621 
622 // initialize omp.iv
623 
624 // check exit condition
625 
626 // check that PrevLB and PrevUB are passed to the 'for'
627 
628 // check stride 1 for 'for' in 'distribute parallel for simd'
629 
630 
631 
632 
633 
634 // check EUB for distribute
635 
636 // initialize omp.iv
637 
638 // check exit condition
639 
640 // check that PrevLB and PrevUB are passed to the 'for'
641 // check that distlb and distub are properly passed to fork_call
642 
643 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
644 
645 
646 // implementation of 'parallel for'
647 
648 
649 // initialize lb and ub to PrevLB and PrevUB
650 
651 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
652 // In this case we use EUB
653 
654 // initialize omp.iv
655 
656 // check exit condition
657 
658 // check that PrevLB and PrevUB are passed to the 'for'
659 
660 // check stride 1 for 'for' in 'distribute parallel for simd'
661 
662 
663 
664 
665 // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
666 
667 // check EUB for distribute
668 
669 // initialize omp.iv
670 
671 // check exit condition
672 
673 // check that PrevLB and PrevUB are passed to the 'for'
674 // check that distlb and distub are properly passed to fork_call
675 
676 // check DistInc
677 
678 // Update UB
679 
680 // Store LB in IV
681 
682 
683 // loop exit
684 
685 // skip implementation of 'parallel for': using default scheduling and was tested above
686 
687 
688 
689 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
690 
691 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
692 
693 
694 // initialize lb and ub to PrevLB and PrevUB
695 
696 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
697 // In this case we use EUB
698 
699 // initialize omp.iv
700 
701 // check exit condition
702 
703 // check that PrevLB and PrevUB are passed to the 'for'
704 
705 // check stride 1 for 'for' in 'distribute parallel for simd'
706 
707 
708 
709 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
710 
711 // 'parallel for' implementation using outer and inner loops and PrevEUB
712 
713 // initialize lb and ub to PrevLB and PrevUB
714 
715 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
716 
717 // initialize omp.iv (IV = LB)
718 
719 // outer loop: while (IV < UB) {
720 
721 
722 
723 // skip body branch
724 
725 // IV = IV + 1 and inner loop latch
726 
727 // check NextLB and NextUB
728 
729 
730 
731 
732 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
733 
734 // 'parallel for' implementation using outer and inner loops and PrevEUB
735 
736 // initialize lb and ub to PrevLB and PrevUB
737 
738 
739 // initialize omp.iv (IV = LB)
740 
741 
742 // skip body branch
743 
744 // IV = IV + 1 and inner loop latch
745 
746 // check NextLB and NextUB
747 
748 
749 
750 
751 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
752 
753 // 'parallel for' implementation using outer and inner loops and PrevEUB
754 
755 // initialize lb and ub to PrevLB and PrevUB
756 
757 
758 // initialize omp.iv (IV = LB)
759 
760 
761 // skip body branch
762 
763 // IV = IV + 1 and inner loop latch
764 
765 // check NextLB and NextUB
766 
767 
768 
769 #endif
770 // CHECK1-LABEL: define {{[^@]+}}@main
771 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
772 // CHECK1-NEXT:  entry:
773 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
774 // CHECK1-NEXT:    [[A:%.*]] = alloca double*, align 8
775 // CHECK1-NEXT:    [[B:%.*]] = alloca double*, align 8
776 // CHECK1-NEXT:    [[C:%.*]] = alloca double*, align 8
777 // CHECK1-NEXT:    [[N:%.*]] = alloca i32, align 4
778 // CHECK1-NEXT:    [[CH:%.*]] = alloca i32, align 4
779 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
780 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
781 // CHECK1-NEXT:    store i32 10000, i32* [[N]], align 4
782 // CHECK1-NEXT:    store i32 100, i32* [[CH]], align 4
783 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
784 // CHECK1-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
785 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
786 // CHECK1-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
787 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
788 // CHECK1-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
789 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
790 // CHECK1-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
791 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
792 // CHECK1-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
793 // CHECK1-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
794 // CHECK1-NEXT:    ret i32 0
795 //
796 //
797 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
798 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
799 // CHECK1-NEXT:  entry:
800 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
801 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
802 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
803 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
804 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
805 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
806 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
807 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
808 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
809 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
810 // CHECK1-NEXT:    ret void
811 //
812 //
813 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
814 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
815 // CHECK1-NEXT:  entry:
816 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
817 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
818 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
819 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
820 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
821 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
822 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
830 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
831 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
832 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
833 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
834 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
835 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
836 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
837 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
838 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
839 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
840 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
841 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
842 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
843 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
844 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
845 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
846 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
847 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
848 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
849 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
850 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
851 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
852 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
853 // CHECK1:       omp.precond.then:
854 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
855 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
856 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
857 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
858 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
859 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
860 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
861 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
862 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
863 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
864 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
865 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
866 // CHECK1:       cond.true:
867 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
868 // CHECK1-NEXT:    br label [[COND_END:%.*]]
869 // CHECK1:       cond.false:
870 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
871 // CHECK1-NEXT:    br label [[COND_END]]
872 // CHECK1:       cond.end:
873 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
874 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
875 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
876 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
877 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
878 // CHECK1:       omp.inner.for.cond:
879 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
880 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
881 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
882 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
883 // CHECK1:       omp.inner.for.body:
884 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
885 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
886 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
887 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
888 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
889 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
890 // CHECK1:       omp.inner.for.inc:
891 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
892 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
893 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
894 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
895 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
896 // CHECK1:       omp.inner.for.end:
897 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
898 // CHECK1:       omp.loop.exit:
899 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
900 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
901 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
902 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
903 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
904 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
905 // CHECK1:       .omp.final.then:
906 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
907 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
908 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
909 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
910 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
911 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
912 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
913 // CHECK1:       .omp.final.done:
914 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
915 // CHECK1:       omp.precond.end:
916 // CHECK1-NEXT:    ret void
917 //
918 //
919 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
920 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
921 // CHECK1-NEXT:  entry:
922 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
923 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
924 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
925 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
926 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
927 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
928 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
929 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
930 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
931 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
932 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
933 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
934 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
935 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
936 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
937 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
938 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
939 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
940 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
941 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
942 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
943 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
944 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
945 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
946 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
947 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
948 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
949 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
950 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
951 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
952 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
953 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
954 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
955 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
956 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
957 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
958 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
959 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
960 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
961 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
962 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
963 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
964 // CHECK1:       omp.precond.then:
965 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
966 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
967 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
968 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
969 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
970 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
971 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
972 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
973 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
974 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
975 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
976 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
977 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
978 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
979 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
980 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
981 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
982 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
983 // CHECK1:       cond.true:
984 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
985 // CHECK1-NEXT:    br label [[COND_END:%.*]]
986 // CHECK1:       cond.false:
987 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
988 // CHECK1-NEXT:    br label [[COND_END]]
989 // CHECK1:       cond.end:
990 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
991 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
992 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
993 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
994 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
995 // CHECK1:       omp.inner.for.cond:
996 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
997 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
998 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
999 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1000 // CHECK1:       omp.inner.for.body:
1001 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1002 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1003 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1004 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
1005 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
1006 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1007 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1008 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1009 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
1010 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
1011 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1012 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1013 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1014 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
1015 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1016 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
1017 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1018 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1019 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1020 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
1021 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1022 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
1023 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1024 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
1025 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1026 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
1027 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1028 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
1029 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
1030 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1031 // CHECK1:       omp.body.continue:
1032 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1033 // CHECK1:       omp.inner.for.inc:
1034 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1035 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1036 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1037 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
1038 // CHECK1:       omp.inner.for.end:
1039 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1040 // CHECK1:       omp.loop.exit:
1041 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1042 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1043 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1044 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1045 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1046 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1047 // CHECK1:       .omp.final.then:
1048 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1049 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1050 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1051 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1052 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1053 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1054 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1055 // CHECK1:       .omp.final.done:
1056 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1057 // CHECK1:       omp.precond.end:
1058 // CHECK1-NEXT:    ret void
1059 //
1060 //
1061 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
1062 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1063 // CHECK1-NEXT:  entry:
1064 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1065 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1066 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1067 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1068 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1069 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1070 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1071 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1072 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1073 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1074 // CHECK1-NEXT:    ret void
1075 //
1076 //
1077 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
1078 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1079 // CHECK1-NEXT:  entry:
1080 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1081 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1082 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1083 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1084 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1085 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1086 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1088 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1089 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1090 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1091 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1092 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1093 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1094 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1095 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1096 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1097 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1098 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1099 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1100 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1101 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1102 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1103 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1104 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1105 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1106 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1107 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1108 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1109 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1110 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1111 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1112 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1113 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1114 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1115 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1116 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1117 // CHECK1:       omp.precond.then:
1118 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1119 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1120 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1121 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1122 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1123 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1124 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1125 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1126 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1127 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1128 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1129 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1130 // CHECK1:       cond.true:
1131 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1132 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1133 // CHECK1:       cond.false:
1134 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1135 // CHECK1-NEXT:    br label [[COND_END]]
1136 // CHECK1:       cond.end:
1137 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1138 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1139 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1140 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1141 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1142 // CHECK1:       omp.inner.for.cond:
1143 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1144 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1145 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1146 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1147 // CHECK1:       omp.inner.for.body:
1148 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
1149 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1150 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1151 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1152 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
1153 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1154 // CHECK1:       omp.inner.for.inc:
1155 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1156 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
1157 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1158 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1159 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
1160 // CHECK1:       omp.inner.for.end:
1161 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1162 // CHECK1:       omp.loop.exit:
1163 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1164 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1165 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1166 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1167 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1168 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1169 // CHECK1:       .omp.final.then:
1170 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1171 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1172 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1173 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1174 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1175 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1176 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1177 // CHECK1:       .omp.final.done:
1178 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1179 // CHECK1:       omp.precond.end:
1180 // CHECK1-NEXT:    ret void
1181 //
1182 //
1183 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
1184 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1185 // CHECK1-NEXT:  entry:
1186 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1187 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1188 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1189 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1190 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1191 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1192 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1193 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1194 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1195 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1196 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1197 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1198 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1199 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1200 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1201 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1202 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1203 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1204 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
1205 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1206 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1207 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1208 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1209 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1210 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1211 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1212 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1213 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1214 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1215 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1216 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1217 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1218 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1219 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1220 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1221 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1222 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1223 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1224 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1225 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1226 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1227 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1228 // CHECK1:       omp.precond.then:
1229 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1230 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1231 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1232 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1233 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1234 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1235 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1236 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1237 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1238 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1239 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1240 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1241 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1242 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1243 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1244 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1245 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1246 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1247 // CHECK1:       cond.true:
1248 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1249 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1250 // CHECK1:       cond.false:
1251 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1252 // CHECK1-NEXT:    br label [[COND_END]]
1253 // CHECK1:       cond.end:
1254 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1255 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1256 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1257 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1258 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1259 // CHECK1:       omp.inner.for.cond:
1260 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1261 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
1262 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1263 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1264 // CHECK1:       omp.inner.for.body:
1265 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1266 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1267 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1268 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
1269 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
1270 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1271 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1272 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1273 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
1274 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
1275 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1276 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1277 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1278 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
1279 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1280 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
1281 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1282 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1283 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1284 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
1285 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
1286 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
1287 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
1288 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
1289 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
1290 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
1291 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
1292 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
1293 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
1294 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1295 // CHECK1:       omp.body.continue:
1296 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1297 // CHECK1:       omp.inner.for.inc:
1298 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1299 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1300 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1301 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
1302 // CHECK1:       omp.inner.for.end:
1303 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1304 // CHECK1:       omp.loop.exit:
1305 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1306 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1307 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1308 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1309 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1310 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1311 // CHECK1:       .omp.final.then:
1312 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1313 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1314 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1315 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1316 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1317 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1318 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1319 // CHECK1:       .omp.final.done:
1320 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1321 // CHECK1:       omp.precond.end:
1322 // CHECK1-NEXT:    ret void
1323 //
1324 //
1325 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
1326 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1327 // CHECK1-NEXT:  entry:
1328 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1329 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1330 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1331 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1332 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1333 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1334 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1335 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1336 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1337 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1338 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1339 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1340 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1341 // CHECK1-NEXT:    ret void
1342 //
1343 //
1344 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
1345 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1346 // CHECK1-NEXT:  entry:
1347 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1348 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1349 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1350 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1351 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1352 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1353 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1354 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1355 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1356 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1357 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1358 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1359 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1360 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1361 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1362 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1363 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1364 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1365 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1366 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1367 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1368 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1369 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1370 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1371 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1372 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1373 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1374 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1375 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1376 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
1377 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1378 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1379 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
1380 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1381 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1382 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1383 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1384 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1385 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
1386 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1387 // CHECK1:       omp.precond.then:
1388 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1389 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1390 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
1391 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1392 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1393 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1394 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1395 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1396 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
1397 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1398 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1399 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1400 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1401 // CHECK1:       cond.true:
1402 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1403 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1404 // CHECK1:       cond.false:
1405 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1406 // CHECK1-NEXT:    br label [[COND_END]]
1407 // CHECK1:       cond.end:
1408 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1409 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1410 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1411 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1412 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1413 // CHECK1:       omp.inner.for.cond:
1414 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1415 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1416 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
1417 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
1418 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1419 // CHECK1:       omp.inner.for.body:
1420 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1421 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1422 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1423 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1424 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
1425 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1426 // CHECK1:       omp.inner.for.inc:
1427 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1428 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1429 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
1430 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1431 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1432 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1433 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1434 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1435 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1436 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1437 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
1438 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1439 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1440 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1441 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
1442 // CHECK1-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
1443 // CHECK1:       cond.true10:
1444 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1445 // CHECK1-NEXT:    br label [[COND_END12:%.*]]
1446 // CHECK1:       cond.false11:
1447 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1448 // CHECK1-NEXT:    br label [[COND_END12]]
1449 // CHECK1:       cond.end12:
1450 // CHECK1-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
1451 // CHECK1-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1452 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1453 // CHECK1-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1454 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
1455 // CHECK1:       omp.inner.for.end:
1456 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1457 // CHECK1:       omp.loop.exit:
1458 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1459 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
1460 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
1461 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1462 // CHECK1-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1463 // CHECK1-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1464 // CHECK1:       .omp.final.then:
1465 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1466 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
1467 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
1468 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
1469 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
1470 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
1471 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1472 // CHECK1:       .omp.final.done:
1473 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1474 // CHECK1:       omp.precond.end:
1475 // CHECK1-NEXT:    ret void
1476 //
1477 //
1478 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
1479 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1480 // CHECK1-NEXT:  entry:
1481 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1482 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1483 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1484 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1485 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1486 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1487 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1488 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1489 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1490 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1491 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1492 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1493 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1494 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1495 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1496 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1497 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1498 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1499 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
1500 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1501 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1502 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1503 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1504 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1505 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1506 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1507 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1508 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1509 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1510 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1511 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1512 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1513 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1514 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1515 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1516 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1517 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1518 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1519 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1520 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1521 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1522 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1523 // CHECK1:       omp.precond.then:
1524 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1525 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1526 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1527 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1528 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1529 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1530 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1531 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1532 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1533 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1534 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1535 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1536 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1537 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1538 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1539 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1540 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1541 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1542 // CHECK1:       cond.true:
1543 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1544 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1545 // CHECK1:       cond.false:
1546 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1547 // CHECK1-NEXT:    br label [[COND_END]]
1548 // CHECK1:       cond.end:
1549 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1550 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1551 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1552 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1553 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1554 // CHECK1:       omp.inner.for.cond:
1555 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1556 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
1557 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1558 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1559 // CHECK1:       omp.inner.for.body:
1560 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1561 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1562 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1563 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
1564 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
1565 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1566 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1567 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1568 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
1569 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
1570 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1571 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1572 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1573 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
1574 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1575 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
1576 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1577 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1578 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1579 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
1580 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
1581 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
1582 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
1583 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
1584 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
1585 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
1586 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
1587 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
1588 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
1589 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1590 // CHECK1:       omp.body.continue:
1591 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1592 // CHECK1:       omp.inner.for.inc:
1593 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1594 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1595 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1596 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1597 // CHECK1:       omp.inner.for.end:
1598 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1599 // CHECK1:       omp.loop.exit:
1600 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1601 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1602 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1603 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1604 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1605 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1606 // CHECK1:       .omp.final.then:
1607 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1608 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1609 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1610 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1611 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1612 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1613 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1614 // CHECK1:       .omp.final.done:
1615 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1616 // CHECK1:       omp.precond.end:
1617 // CHECK1-NEXT:    ret void
1618 //
1619 //
1620 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
1621 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1622 // CHECK1-NEXT:  entry:
1623 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1624 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1625 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1626 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1627 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1628 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1629 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1630 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1631 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1632 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1633 // CHECK1-NEXT:    ret void
1634 //
1635 //
1636 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
1637 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1638 // CHECK1-NEXT:  entry:
1639 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1640 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1641 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1642 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1643 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1644 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1645 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1646 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1647 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1648 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1649 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1650 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1651 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1652 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1653 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1654 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1655 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1656 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1657 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1658 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1659 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1660 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1661 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1662 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1663 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1664 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1665 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1666 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1667 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1668 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1669 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1670 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1671 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1672 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1673 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1674 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1675 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1676 // CHECK1:       omp.precond.then:
1677 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1678 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1679 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1680 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1681 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1682 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1683 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1684 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1685 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1686 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1687 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1688 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1689 // CHECK1:       cond.true:
1690 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1691 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1692 // CHECK1:       cond.false:
1693 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1694 // CHECK1-NEXT:    br label [[COND_END]]
1695 // CHECK1:       cond.end:
1696 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1697 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1698 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1699 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1700 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1701 // CHECK1:       omp.inner.for.cond:
1702 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1703 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1704 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1705 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1706 // CHECK1:       omp.inner.for.body:
1707 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
1708 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1709 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1710 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1711 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
1712 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1713 // CHECK1:       omp.inner.for.inc:
1714 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1715 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
1716 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1717 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1718 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1719 // CHECK1:       omp.inner.for.end:
1720 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1721 // CHECK1:       omp.loop.exit:
1722 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1723 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1724 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1725 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1726 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1727 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1728 // CHECK1:       .omp.final.then:
1729 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1730 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1731 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1732 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1733 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1734 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1735 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1736 // CHECK1:       .omp.final.done:
1737 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1738 // CHECK1:       omp.precond.end:
1739 // CHECK1-NEXT:    ret void
1740 //
1741 //
1742 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
1743 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1744 // CHECK1-NEXT:  entry:
1745 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1746 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1747 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1748 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1749 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1750 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1751 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1752 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1753 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1754 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1755 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1756 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1757 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1758 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1759 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1760 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1761 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1762 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1763 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
1764 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1765 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1766 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1767 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1768 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1769 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1770 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1771 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1772 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1773 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1774 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1775 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1776 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1777 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1778 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1779 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1780 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1781 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1782 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1783 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1784 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1785 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1786 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1787 // CHECK1:       omp.precond.then:
1788 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1789 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1790 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1791 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1792 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1793 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1794 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1795 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1796 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1797 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1798 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1799 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1800 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1801 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1802 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1803 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1804 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1805 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1806 // CHECK1:       cond.true:
1807 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1808 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1809 // CHECK1:       cond.false:
1810 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1811 // CHECK1-NEXT:    br label [[COND_END]]
1812 // CHECK1:       cond.end:
1813 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1814 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1815 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1816 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1817 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1818 // CHECK1:       omp.inner.for.cond:
1819 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1820 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
1821 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1822 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1823 // CHECK1:       omp.inner.for.body:
1824 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1825 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1826 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1827 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
1828 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
1829 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1830 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1831 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1832 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
1833 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
1834 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1835 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1836 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1837 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
1838 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1839 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
1840 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1841 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1842 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1843 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
1844 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
1845 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
1846 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
1847 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
1848 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
1849 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
1850 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
1851 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
1852 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
1853 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1854 // CHECK1:       omp.body.continue:
1855 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1856 // CHECK1:       omp.inner.for.inc:
1857 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1858 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1859 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1860 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1861 // CHECK1:       omp.inner.for.end:
1862 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1863 // CHECK1:       omp.loop.exit:
1864 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1865 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1866 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1867 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1868 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1869 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1870 // CHECK1:       .omp.final.then:
1871 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1872 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1873 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1874 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1875 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1876 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1877 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1878 // CHECK1:       .omp.final.done:
1879 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1880 // CHECK1:       omp.precond.end:
1881 // CHECK1-NEXT:    ret void
1882 //
1883 //
1884 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
1885 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
1886 // CHECK1-NEXT:  entry:
1887 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1888 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1889 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1890 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1891 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1892 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1893 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1894 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1895 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1896 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1897 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1898 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1899 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1900 // CHECK1-NEXT:    ret void
1901 //
1902 //
1903 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
1904 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1905 // CHECK1-NEXT:  entry:
1906 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1907 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1908 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1909 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1910 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1911 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1912 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1913 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1914 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1915 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1916 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1917 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1918 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1919 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1920 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1921 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1922 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1923 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1924 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1925 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1926 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1927 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1928 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1929 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1930 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1931 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1932 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1933 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1934 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1935 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1936 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1937 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
1938 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1939 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
1940 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1941 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1942 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
1943 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1944 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
1945 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1946 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1947 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1948 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
1949 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1950 // CHECK1:       omp.precond.then:
1951 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1952 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1953 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
1954 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1955 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1956 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1957 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1958 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1959 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1960 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1961 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1962 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1963 // CHECK1:       cond.true:
1964 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1965 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1966 // CHECK1:       cond.false:
1967 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1968 // CHECK1-NEXT:    br label [[COND_END]]
1969 // CHECK1:       cond.end:
1970 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1971 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1972 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1973 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1974 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1975 // CHECK1:       omp.inner.for.cond:
1976 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1977 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1978 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1979 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1980 // CHECK1:       omp.inner.for.body:
1981 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
1982 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1983 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1984 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1985 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
1986 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
1987 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
1988 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
1989 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
1990 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1991 // CHECK1:       omp.inner.for.inc:
1992 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1993 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
1994 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1995 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1996 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
1997 // CHECK1:       omp.inner.for.end:
1998 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1999 // CHECK1:       omp.loop.exit:
2000 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2001 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2002 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2003 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2004 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2005 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2006 // CHECK1:       .omp.final.then:
2007 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2008 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2009 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2010 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2011 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2012 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2013 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2014 // CHECK1:       .omp.final.done:
2015 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2016 // CHECK1:       omp.precond.end:
2017 // CHECK1-NEXT:    ret void
2018 //
2019 //
2020 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
2021 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2022 // CHECK1-NEXT:  entry:
2023 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2024 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2025 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2026 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2027 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2028 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2029 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2030 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2031 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2032 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2033 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2034 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2035 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2036 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2037 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2038 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2039 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2040 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2041 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2042 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
2043 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2044 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2045 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2046 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2047 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2048 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2049 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2050 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2051 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2052 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2053 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2054 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2055 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2056 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2057 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2058 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2059 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2060 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2061 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2062 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2063 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2064 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2065 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2066 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2067 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2068 // CHECK1:       omp.precond.then:
2069 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2070 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2071 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2072 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2073 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2074 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2075 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2076 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2077 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2078 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2079 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2080 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
2081 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2082 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2083 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
2084 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2085 // CHECK1:       omp.dispatch.cond:
2086 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2087 // CHECK1-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
2088 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2089 // CHECK1-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
2090 // CHECK1-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2091 // CHECK1:       cond.true:
2092 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2093 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2094 // CHECK1:       cond.false:
2095 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2096 // CHECK1-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
2097 // CHECK1-NEXT:    br label [[COND_END]]
2098 // CHECK1:       cond.end:
2099 // CHECK1-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
2100 // CHECK1-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
2101 // CHECK1-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
2102 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2103 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2104 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2105 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2106 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2107 // CHECK1-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2108 // CHECK1:       omp.dispatch.body:
2109 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2110 // CHECK1:       omp.inner.for.cond:
2111 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2112 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
2113 // CHECK1-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
2114 // CHECK1-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2115 // CHECK1:       omp.inner.for.body:
2116 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2117 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
2118 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2119 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
2120 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
2121 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2122 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
2123 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
2124 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
2125 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
2126 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2127 // CHECK1-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
2128 // CHECK1-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM13]]
2129 // CHECK1-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !40
2130 // CHECK1-NEXT:    [[ADD15:%.*]] = fadd double [[TMP25]], [[TMP28]]
2131 // CHECK1-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
2132 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2133 // CHECK1-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
2134 // CHECK1-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM16]]
2135 // CHECK1-NEXT:    store double [[ADD15]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !40
2136 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
2137 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
2138 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
2139 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
2140 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
2141 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
2142 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
2143 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
2144 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
2145 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2146 // CHECK1:       omp.body.continue:
2147 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2148 // CHECK1:       omp.inner.for.inc:
2149 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2150 // CHECK1-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP35]], 1
2151 // CHECK1-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2152 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
2153 // CHECK1:       omp.inner.for.end:
2154 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2155 // CHECK1:       omp.dispatch.inc:
2156 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2157 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2158 // CHECK1-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
2159 // CHECK1-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
2160 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2161 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2162 // CHECK1-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
2163 // CHECK1-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
2164 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2165 // CHECK1:       omp.dispatch.end:
2166 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2167 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
2168 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
2169 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2170 // CHECK1-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
2171 // CHECK1-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2172 // CHECK1:       .omp.final.then:
2173 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2174 // CHECK1-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP44]], 0
2175 // CHECK1-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
2176 // CHECK1-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
2177 // CHECK1-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
2178 // CHECK1-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
2179 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2180 // CHECK1:       .omp.final.done:
2181 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2182 // CHECK1:       omp.precond.end:
2183 // CHECK1-NEXT:    ret void
2184 //
2185 //
2186 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
2187 // CHECK1-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
2188 // CHECK1-NEXT:  entry:
2189 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2190 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2191 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2192 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2193 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2194 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2195 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2196 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2197 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2198 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2199 // CHECK1-NEXT:    ret void
2200 //
2201 //
2202 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18
2203 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2204 // CHECK1-NEXT:  entry:
2205 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2206 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2207 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2208 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2209 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2210 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2211 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2212 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2213 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2214 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2215 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2216 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2217 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2218 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2219 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2220 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
2221 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2222 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2223 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2224 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2225 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2226 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2227 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2228 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2229 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2230 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2231 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2232 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2233 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2234 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2235 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2236 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2237 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2238 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2239 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2240 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2241 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2242 // CHECK1:       omp.precond.then:
2243 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2244 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2245 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2246 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2247 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2248 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2249 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2250 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2251 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2252 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2253 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2254 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2255 // CHECK1:       cond.true:
2256 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2257 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2258 // CHECK1:       cond.false:
2259 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2260 // CHECK1-NEXT:    br label [[COND_END]]
2261 // CHECK1:       cond.end:
2262 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2263 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2264 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2265 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2266 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2267 // CHECK1:       omp.inner.for.cond:
2268 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2269 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2270 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2271 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2272 // CHECK1:       omp.inner.for.body:
2273 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
2274 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2275 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2276 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2277 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
2278 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2279 // CHECK1:       omp.inner.for.inc:
2280 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2281 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
2282 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2283 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2284 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
2285 // CHECK1:       omp.inner.for.end:
2286 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2287 // CHECK1:       omp.loop.exit:
2288 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2289 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2290 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2291 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2292 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2293 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2294 // CHECK1:       .omp.final.then:
2295 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2296 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2297 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2298 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2299 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2300 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2301 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2302 // CHECK1:       .omp.final.done:
2303 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2304 // CHECK1:       omp.precond.end:
2305 // CHECK1-NEXT:    ret void
2306 //
2307 //
2308 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..19
2309 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2310 // CHECK1-NEXT:  entry:
2311 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2312 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2313 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2314 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2315 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2316 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2317 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2318 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2319 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2320 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2321 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2322 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2323 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2324 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2325 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2326 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2327 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2328 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2329 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
2330 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2331 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2332 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2333 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2334 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2335 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2336 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2337 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2338 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2339 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2340 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2341 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2342 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2343 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2344 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2345 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2346 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2347 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2348 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2349 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2350 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2351 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2352 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2353 // CHECK1:       omp.precond.then:
2354 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2355 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2356 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2357 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2358 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2359 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2360 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2361 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2362 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2363 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2364 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2365 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2366 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2367 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2368 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
2369 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
2370 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2371 // CHECK1:       omp.dispatch.cond:
2372 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2373 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
2374 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2375 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
2376 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2377 // CHECK1:       omp.dispatch.body:
2378 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2379 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2380 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2381 // CHECK1:       omp.inner.for.cond:
2382 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2383 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
2384 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2385 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2386 // CHECK1:       omp.inner.for.body:
2387 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2388 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
2389 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2390 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
2391 // CHECK1-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
2392 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2393 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
2394 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
2395 // CHECK1-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
2396 // CHECK1-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
2397 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2398 // CHECK1-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
2399 // CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
2400 // CHECK1-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
2401 // CHECK1-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
2402 // CHECK1-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
2403 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2404 // CHECK1-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
2405 // CHECK1-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
2406 // CHECK1-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
2407 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
2408 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
2409 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
2410 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
2411 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
2412 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
2413 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
2414 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
2415 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
2416 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2417 // CHECK1:       omp.body.continue:
2418 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2419 // CHECK1:       omp.inner.for.inc:
2420 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2421 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
2422 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2423 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
2424 // CHECK1:       omp.inner.for.end:
2425 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2426 // CHECK1:       omp.dispatch.inc:
2427 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2428 // CHECK1:       omp.dispatch.end:
2429 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2430 // CHECK1-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2431 // CHECK1-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2432 // CHECK1:       .omp.final.then:
2433 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2434 // CHECK1-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
2435 // CHECK1-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
2436 // CHECK1-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
2437 // CHECK1-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
2438 // CHECK1-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
2439 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2440 // CHECK1:       .omp.final.done:
2441 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2442 // CHECK1:       omp.precond.end:
2443 // CHECK1-NEXT:    ret void
2444 //
2445 //
2446 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
2447 // CHECK1-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
2448 // CHECK1-NEXT:  entry:
2449 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
2450 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2451 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2452 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2453 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2454 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
2455 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2456 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2457 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2458 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2459 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
2460 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2461 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2462 // CHECK1-NEXT:    ret void
2463 //
2464 //
2465 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..22
2466 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2467 // CHECK1-NEXT:  entry:
2468 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2469 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2470 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
2471 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2472 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2473 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2474 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2475 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2476 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2477 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2478 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2479 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2480 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2481 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2482 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2483 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2484 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2485 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2486 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2487 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2488 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2489 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
2490 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2491 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2492 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2493 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2494 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
2495 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2496 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
2497 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
2498 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
2499 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
2500 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
2501 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
2502 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2503 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2504 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
2505 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2506 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2507 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2508 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2509 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2510 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
2511 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2512 // CHECK1:       omp.precond.then:
2513 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2514 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2515 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
2516 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2517 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2518 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2519 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2520 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2521 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2522 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2523 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2524 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2525 // CHECK1:       cond.true:
2526 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2527 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2528 // CHECK1:       cond.false:
2529 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2530 // CHECK1-NEXT:    br label [[COND_END]]
2531 // CHECK1:       cond.end:
2532 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2533 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2534 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2535 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2536 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2537 // CHECK1:       omp.inner.for.cond:
2538 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2539 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2540 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2541 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2542 // CHECK1:       omp.inner.for.body:
2543 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
2544 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2545 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2546 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
2547 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
2548 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2549 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
2550 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
2551 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
2552 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2553 // CHECK1:       omp.inner.for.inc:
2554 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2555 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
2556 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
2557 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2558 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
2559 // CHECK1:       omp.inner.for.end:
2560 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2561 // CHECK1:       omp.loop.exit:
2562 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2563 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2564 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2565 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2566 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2567 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2568 // CHECK1:       .omp.final.then:
2569 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2570 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2571 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2572 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2573 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2574 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2575 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2576 // CHECK1:       .omp.final.done:
2577 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2578 // CHECK1:       omp.precond.end:
2579 // CHECK1-NEXT:    ret void
2580 //
2581 //
2582 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..23
2583 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2584 // CHECK1-NEXT:  entry:
2585 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2586 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2587 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2588 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2589 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2590 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2591 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2592 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2593 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2594 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2595 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2596 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2597 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2598 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2599 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2600 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2601 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2602 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2603 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2604 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
2605 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2606 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2607 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2608 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2609 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2610 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2611 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2612 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2613 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2614 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2615 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2616 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2617 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2618 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2619 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2620 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2621 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2622 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2623 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2624 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2625 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2626 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2627 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2628 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2629 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2630 // CHECK1:       omp.precond.then:
2631 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2632 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2633 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2634 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2635 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2636 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2637 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2638 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2639 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2640 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2641 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2642 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
2643 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2644 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2645 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2646 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
2647 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
2648 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2649 // CHECK1:       omp.dispatch.cond:
2650 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2651 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
2652 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2653 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
2654 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2655 // CHECK1:       omp.dispatch.body:
2656 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2657 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
2658 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2659 // CHECK1:       omp.inner.for.cond:
2660 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2661 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
2662 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
2663 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2664 // CHECK1:       omp.inner.for.body:
2665 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2666 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
2667 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2668 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
2669 // CHECK1-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
2670 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2671 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
2672 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
2673 // CHECK1-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
2674 // CHECK1-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
2675 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2676 // CHECK1-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
2677 // CHECK1-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
2678 // CHECK1-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
2679 // CHECK1-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
2680 // CHECK1-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
2681 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2682 // CHECK1-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
2683 // CHECK1-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
2684 // CHECK1-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
2685 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
2686 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
2687 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
2688 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
2689 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
2690 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
2691 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
2692 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
2693 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
2694 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2695 // CHECK1:       omp.body.continue:
2696 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2697 // CHECK1:       omp.inner.for.inc:
2698 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2699 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
2700 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2701 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
2702 // CHECK1:       omp.inner.for.end:
2703 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2704 // CHECK1:       omp.dispatch.inc:
2705 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2706 // CHECK1:       omp.dispatch.end:
2707 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2708 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2709 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2710 // CHECK1:       .omp.final.then:
2711 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2712 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
2713 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
2714 // CHECK1-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
2715 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
2716 // CHECK1-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
2717 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2718 // CHECK1:       .omp.final.done:
2719 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2720 // CHECK1:       omp.precond.end:
2721 // CHECK1-NEXT:    ret void
2722 //
2723 //
2724 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2725 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
2726 // CHECK1-NEXT:  entry:
2727 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
2728 // CHECK1-NEXT:    ret void
2729 //
2730 //
2731 // CHECK2-LABEL: define {{[^@]+}}@main
2732 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
2733 // CHECK2-NEXT:  entry:
2734 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2735 // CHECK2-NEXT:    [[A:%.*]] = alloca double*, align 8
2736 // CHECK2-NEXT:    [[B:%.*]] = alloca double*, align 8
2737 // CHECK2-NEXT:    [[C:%.*]] = alloca double*, align 8
2738 // CHECK2-NEXT:    [[N:%.*]] = alloca i32, align 4
2739 // CHECK2-NEXT:    [[CH:%.*]] = alloca i32, align 4
2740 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
2741 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2742 // CHECK2-NEXT:    store i32 10000, i32* [[N]], align 4
2743 // CHECK2-NEXT:    store i32 100, i32* [[CH]], align 4
2744 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
2745 // CHECK2-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
2746 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
2747 // CHECK2-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
2748 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
2749 // CHECK2-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
2750 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
2751 // CHECK2-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
2752 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
2753 // CHECK2-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
2754 // CHECK2-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
2755 // CHECK2-NEXT:    ret i32 0
2756 //
2757 //
2758 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
2759 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
2760 // CHECK2-NEXT:  entry:
2761 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2762 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2763 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2764 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2765 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2766 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2767 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2768 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2769 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2770 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2771 // CHECK2-NEXT:    ret void
2772 //
2773 //
2774 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
2775 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2776 // CHECK2-NEXT:  entry:
2777 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2778 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2779 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2780 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2781 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2782 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2783 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2784 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2785 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2786 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2787 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2788 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2789 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2790 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2791 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2792 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
2793 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2794 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2795 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2796 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2797 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2798 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2799 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2800 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2801 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2802 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2803 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2804 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2805 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2806 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2807 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2808 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2809 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2810 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
2811 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2812 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2813 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2814 // CHECK2:       omp.precond.then:
2815 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2816 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2817 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2818 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2819 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2820 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2821 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2822 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2823 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2824 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2825 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2826 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2827 // CHECK2:       cond.true:
2828 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2829 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2830 // CHECK2:       cond.false:
2831 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2832 // CHECK2-NEXT:    br label [[COND_END]]
2833 // CHECK2:       cond.end:
2834 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2835 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2836 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2837 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2838 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2839 // CHECK2:       omp.inner.for.cond:
2840 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2841 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
2842 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2843 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2844 // CHECK2:       omp.inner.for.body:
2845 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
2846 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2847 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
2848 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2849 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
2850 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2851 // CHECK2:       omp.inner.for.inc:
2852 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2853 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
2854 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2855 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2856 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
2857 // CHECK2:       omp.inner.for.end:
2858 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2859 // CHECK2:       omp.loop.exit:
2860 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2861 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2862 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2863 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2864 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2865 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2866 // CHECK2:       .omp.final.then:
2867 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2868 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2869 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2870 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2871 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2872 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2873 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2874 // CHECK2:       .omp.final.done:
2875 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
2876 // CHECK2:       omp.precond.end:
2877 // CHECK2-NEXT:    ret void
2878 //
2879 //
2880 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
2881 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2882 // CHECK2-NEXT:  entry:
2883 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2884 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2885 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2886 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2887 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2888 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2889 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2890 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2891 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2892 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2893 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2894 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2895 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2896 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2897 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2898 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2899 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2900 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
2901 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
2902 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2903 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2904 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2905 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2906 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2907 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2908 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2909 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2910 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2911 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2912 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2913 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2914 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2915 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2916 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2917 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2918 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2919 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2920 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2921 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
2922 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2923 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2924 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2925 // CHECK2:       omp.precond.then:
2926 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2927 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2928 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2929 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2930 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2931 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2932 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2933 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2934 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2935 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2936 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2937 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2938 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2939 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2940 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2941 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2942 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2943 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2944 // CHECK2:       cond.true:
2945 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2946 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2947 // CHECK2:       cond.false:
2948 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2949 // CHECK2-NEXT:    br label [[COND_END]]
2950 // CHECK2:       cond.end:
2951 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2952 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2953 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2954 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2955 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2956 // CHECK2:       omp.inner.for.cond:
2957 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2958 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
2959 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2960 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2961 // CHECK2:       omp.inner.for.body:
2962 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2963 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
2964 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2965 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
2966 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
2967 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2968 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
2969 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
2970 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
2971 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
2972 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2973 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
2974 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
2975 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
2976 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
2977 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
2978 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
2979 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
2980 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
2981 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
2982 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2983 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
2984 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2985 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
2986 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2987 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
2988 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
2989 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
2990 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
2991 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2992 // CHECK2:       omp.body.continue:
2993 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2994 // CHECK2:       omp.inner.for.inc:
2995 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2996 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
2997 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
2998 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
2999 // CHECK2:       omp.inner.for.end:
3000 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3001 // CHECK2:       omp.loop.exit:
3002 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3003 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3004 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3005 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3006 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3007 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3008 // CHECK2:       .omp.final.then:
3009 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3010 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3011 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3012 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3013 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3014 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3015 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3016 // CHECK2:       .omp.final.done:
3017 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3018 // CHECK2:       omp.precond.end:
3019 // CHECK2-NEXT:    ret void
3020 //
3021 //
3022 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
3023 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3024 // CHECK2-NEXT:  entry:
3025 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3026 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3027 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3028 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3029 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3030 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3031 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3032 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3033 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3034 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3035 // CHECK2-NEXT:    ret void
3036 //
3037 //
3038 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
3039 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3040 // CHECK2-NEXT:  entry:
3041 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3042 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3043 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3044 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3045 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3046 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3047 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3048 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3049 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3050 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3051 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3052 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3053 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3054 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3055 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3056 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3057 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3058 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3059 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3060 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3061 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3062 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3063 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3064 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3065 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3066 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3067 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3068 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3069 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3070 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3071 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3072 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3073 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3074 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3075 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3076 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3077 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3078 // CHECK2:       omp.precond.then:
3079 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3080 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3081 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3082 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3083 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3084 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3085 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3086 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3087 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3088 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3089 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3090 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3091 // CHECK2:       cond.true:
3092 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3093 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3094 // CHECK2:       cond.false:
3095 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3096 // CHECK2-NEXT:    br label [[COND_END]]
3097 // CHECK2:       cond.end:
3098 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3099 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3100 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3101 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3102 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3103 // CHECK2:       omp.inner.for.cond:
3104 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3105 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
3106 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3107 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3108 // CHECK2:       omp.inner.for.body:
3109 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
3110 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
3111 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
3112 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3113 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
3114 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3115 // CHECK2:       omp.inner.for.inc:
3116 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3117 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
3118 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3119 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
3120 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
3121 // CHECK2:       omp.inner.for.end:
3122 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3123 // CHECK2:       omp.loop.exit:
3124 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3125 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
3126 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
3127 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3128 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3129 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3130 // CHECK2:       .omp.final.then:
3131 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3132 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
3133 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3134 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3135 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3136 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3137 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3138 // CHECK2:       .omp.final.done:
3139 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3140 // CHECK2:       omp.precond.end:
3141 // CHECK2-NEXT:    ret void
3142 //
3143 //
3144 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
3145 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3146 // CHECK2-NEXT:  entry:
3147 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3148 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3149 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3150 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3151 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3152 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3153 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3154 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3155 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3156 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3157 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3158 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3159 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3160 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3161 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3162 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3163 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3164 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3165 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
3166 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3167 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3168 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3169 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3170 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3171 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3172 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3173 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3174 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3175 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3176 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3177 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3178 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3179 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3180 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3181 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3182 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3183 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3184 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3185 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3186 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3187 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3188 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3189 // CHECK2:       omp.precond.then:
3190 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3191 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3192 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3193 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3194 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3195 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3196 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3197 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3198 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3199 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3200 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3201 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3202 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3203 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3204 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3205 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3206 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3207 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3208 // CHECK2:       cond.true:
3209 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3210 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3211 // CHECK2:       cond.false:
3212 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3213 // CHECK2-NEXT:    br label [[COND_END]]
3214 // CHECK2:       cond.end:
3215 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3216 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3217 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3218 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3219 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3220 // CHECK2:       omp.inner.for.cond:
3221 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3222 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
3223 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3224 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3225 // CHECK2:       omp.inner.for.body:
3226 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3227 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3228 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3229 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
3230 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
3231 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3232 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3233 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3234 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
3235 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
3236 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3237 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3238 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3239 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
3240 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3241 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
3242 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
3243 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3244 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3245 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
3246 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
3247 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
3248 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
3249 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
3250 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
3251 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
3252 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
3253 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
3254 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
3255 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3256 // CHECK2:       omp.body.continue:
3257 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3258 // CHECK2:       omp.inner.for.inc:
3259 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3260 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3261 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
3262 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
3263 // CHECK2:       omp.inner.for.end:
3264 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3265 // CHECK2:       omp.loop.exit:
3266 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3267 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3268 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3269 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3270 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3271 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3272 // CHECK2:       .omp.final.then:
3273 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3274 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3275 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3276 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3277 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3278 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3279 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3280 // CHECK2:       .omp.final.done:
3281 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3282 // CHECK2:       omp.precond.end:
3283 // CHECK2-NEXT:    ret void
3284 //
3285 //
3286 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
3287 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3288 // CHECK2-NEXT:  entry:
3289 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
3290 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3291 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3292 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3293 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3294 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
3295 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3296 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3297 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3298 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3299 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
3300 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3301 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3302 // CHECK2-NEXT:    ret void
3303 //
3304 //
3305 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
3306 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3307 // CHECK2-NEXT:  entry:
3308 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3309 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3310 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
3311 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3312 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3313 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3314 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3315 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3316 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3317 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3318 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3319 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3320 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3321 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3322 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3323 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3324 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3325 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3326 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3327 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
3328 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3329 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3330 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3331 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3332 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
3333 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3334 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
3335 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
3336 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
3337 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
3338 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3339 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3340 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
3341 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3342 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3343 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3344 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3345 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3346 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
3347 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3348 // CHECK2:       omp.precond.then:
3349 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3350 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3351 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
3352 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3353 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3354 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
3355 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3356 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3357 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
3358 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3359 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3360 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3361 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3362 // CHECK2:       cond.true:
3363 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3364 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3365 // CHECK2:       cond.false:
3366 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3367 // CHECK2-NEXT:    br label [[COND_END]]
3368 // CHECK2:       cond.end:
3369 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3370 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3371 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3372 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3373 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3374 // CHECK2:       omp.inner.for.cond:
3375 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3376 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3377 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
3378 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
3379 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3380 // CHECK2:       omp.inner.for.body:
3381 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3382 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3383 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3384 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
3385 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
3386 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3387 // CHECK2:       omp.inner.for.inc:
3388 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3389 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3390 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3391 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3392 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3393 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3394 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3395 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3396 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3397 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
3398 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
3399 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3400 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3401 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3402 // CHECK2-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
3403 // CHECK2-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
3404 // CHECK2:       cond.true10:
3405 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
3406 // CHECK2-NEXT:    br label [[COND_END12:%.*]]
3407 // CHECK2:       cond.false11:
3408 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3409 // CHECK2-NEXT:    br label [[COND_END12]]
3410 // CHECK2:       cond.end12:
3411 // CHECK2-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
3412 // CHECK2-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
3413 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
3414 // CHECK2-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
3415 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
3416 // CHECK2:       omp.inner.for.end:
3417 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3418 // CHECK2:       omp.loop.exit:
3419 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3420 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
3421 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
3422 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3423 // CHECK2-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
3424 // CHECK2-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3425 // CHECK2:       .omp.final.then:
3426 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3427 // CHECK2-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
3428 // CHECK2-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
3429 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
3430 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
3431 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
3432 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3433 // CHECK2:       .omp.final.done:
3434 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3435 // CHECK2:       omp.precond.end:
3436 // CHECK2-NEXT:    ret void
3437 //
3438 //
3439 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
3440 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3441 // CHECK2-NEXT:  entry:
3442 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3443 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3444 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3445 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3446 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3447 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3448 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3449 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3450 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3451 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3452 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3453 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3454 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3455 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3456 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3457 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3458 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3459 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3460 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
3461 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3462 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3463 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3464 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3465 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3466 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3467 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3468 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3469 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3470 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3471 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3472 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3473 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3474 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3475 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3476 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3477 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3478 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3479 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3480 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3481 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3482 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3483 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3484 // CHECK2:       omp.precond.then:
3485 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3486 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3487 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3488 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3489 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3490 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3491 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3492 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3493 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3494 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3495 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3496 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3497 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3498 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3499 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3500 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3501 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3502 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3503 // CHECK2:       cond.true:
3504 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3505 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3506 // CHECK2:       cond.false:
3507 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3508 // CHECK2-NEXT:    br label [[COND_END]]
3509 // CHECK2:       cond.end:
3510 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3511 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3512 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3513 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3514 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3515 // CHECK2:       omp.inner.for.cond:
3516 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3517 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
3518 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3519 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3520 // CHECK2:       omp.inner.for.body:
3521 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3522 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3523 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3524 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
3525 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
3526 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3527 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3528 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3529 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
3530 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
3531 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3532 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3533 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3534 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
3535 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3536 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
3537 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
3538 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3539 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3540 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
3541 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
3542 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
3543 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
3544 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
3545 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
3546 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
3547 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
3548 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
3549 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
3550 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3551 // CHECK2:       omp.body.continue:
3552 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3553 // CHECK2:       omp.inner.for.inc:
3554 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3555 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3556 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
3557 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
3558 // CHECK2:       omp.inner.for.end:
3559 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3560 // CHECK2:       omp.loop.exit:
3561 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3562 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3563 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3564 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3565 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3566 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3567 // CHECK2:       .omp.final.then:
3568 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3569 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3570 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3571 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3572 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3573 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3574 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3575 // CHECK2:       .omp.final.done:
3576 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3577 // CHECK2:       omp.precond.end:
3578 // CHECK2-NEXT:    ret void
3579 //
3580 //
3581 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
3582 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3583 // CHECK2-NEXT:  entry:
3584 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3585 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3586 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3587 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3588 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3589 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3590 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3591 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3592 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3593 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3594 // CHECK2-NEXT:    ret void
3595 //
3596 //
3597 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
3598 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3599 // CHECK2-NEXT:  entry:
3600 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3601 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3602 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3603 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3604 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3605 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3606 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3607 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3608 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3609 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3610 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3611 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3612 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3613 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3614 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3615 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
3616 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3617 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3618 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3619 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3620 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3621 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3622 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3623 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3624 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3625 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3626 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3627 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3628 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3629 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3630 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3631 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3632 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3633 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3634 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3635 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3636 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3637 // CHECK2:       omp.precond.then:
3638 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3639 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3640 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3641 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3642 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3643 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3644 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3645 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3646 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3647 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3648 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3649 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3650 // CHECK2:       cond.true:
3651 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3652 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3653 // CHECK2:       cond.false:
3654 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3655 // CHECK2-NEXT:    br label [[COND_END]]
3656 // CHECK2:       cond.end:
3657 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3658 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3659 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3660 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3661 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3662 // CHECK2:       omp.inner.for.cond:
3663 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3664 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
3665 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3666 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3667 // CHECK2:       omp.inner.for.body:
3668 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
3669 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
3670 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
3671 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3672 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
3673 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3674 // CHECK2:       omp.inner.for.inc:
3675 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3676 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
3677 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3678 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
3679 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
3680 // CHECK2:       omp.inner.for.end:
3681 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3682 // CHECK2:       omp.loop.exit:
3683 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3684 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
3685 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
3686 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3687 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3688 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3689 // CHECK2:       .omp.final.then:
3690 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3691 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
3692 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3693 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3694 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3695 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3696 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3697 // CHECK2:       .omp.final.done:
3698 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3699 // CHECK2:       omp.precond.end:
3700 // CHECK2-NEXT:    ret void
3701 //
3702 //
3703 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
3704 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3705 // CHECK2-NEXT:  entry:
3706 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3707 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3708 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3709 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3710 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3711 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3712 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3713 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3714 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3715 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3716 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3717 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3718 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3719 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3720 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3721 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3722 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3723 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3724 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
3725 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3726 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3727 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3728 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3729 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3730 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3731 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3732 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3733 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3734 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
3735 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
3736 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
3737 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3738 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3739 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3740 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3741 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3742 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3743 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3744 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3745 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3746 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3747 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3748 // CHECK2:       omp.precond.then:
3749 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3750 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3751 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3752 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3753 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
3754 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3755 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
3756 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
3757 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3758 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3759 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3760 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3761 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3762 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3763 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3764 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3765 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3766 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3767 // CHECK2:       cond.true:
3768 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3769 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3770 // CHECK2:       cond.false:
3771 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3772 // CHECK2-NEXT:    br label [[COND_END]]
3773 // CHECK2:       cond.end:
3774 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3775 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3776 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3777 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3778 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3779 // CHECK2:       omp.inner.for.cond:
3780 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3781 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
3782 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3783 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3784 // CHECK2:       omp.inner.for.body:
3785 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3786 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3787 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3788 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
3789 // CHECK2-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
3790 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3791 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
3792 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
3793 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
3794 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
3795 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3796 // CHECK2-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
3797 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
3798 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
3799 // CHECK2-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
3800 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
3801 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
3802 // CHECK2-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
3803 // CHECK2-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
3804 // CHECK2-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
3805 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
3806 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
3807 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
3808 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
3809 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
3810 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
3811 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
3812 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
3813 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
3814 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3815 // CHECK2:       omp.body.continue:
3816 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3817 // CHECK2:       omp.inner.for.inc:
3818 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3819 // CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
3820 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
3821 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
3822 // CHECK2:       omp.inner.for.end:
3823 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3824 // CHECK2:       omp.loop.exit:
3825 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3826 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3827 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3828 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3829 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3830 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3831 // CHECK2:       .omp.final.then:
3832 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3833 // CHECK2-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
3834 // CHECK2-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
3835 // CHECK2-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
3836 // CHECK2-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
3837 // CHECK2-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
3838 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3839 // CHECK2:       .omp.final.done:
3840 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3841 // CHECK2:       omp.precond.end:
3842 // CHECK2-NEXT:    ret void
3843 //
3844 //
3845 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
3846 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
3847 // CHECK2-NEXT:  entry:
3848 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
3849 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3850 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
3851 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
3852 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
3853 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
3854 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3855 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
3856 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
3857 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
3858 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
3859 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3860 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3861 // CHECK2-NEXT:    ret void
3862 //
3863 //
3864 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14
3865 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
3866 // CHECK2-NEXT:  entry:
3867 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3868 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3869 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
3870 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3871 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3872 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3873 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3874 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3875 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3876 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3877 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3878 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3879 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3880 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3881 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3882 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3883 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3884 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
3885 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
3886 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3887 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3888 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
3889 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
3890 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
3891 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
3892 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
3893 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
3894 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
3895 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
3896 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
3897 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
3898 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
3899 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3900 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
3901 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3902 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3903 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
3904 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3905 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3906 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3907 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
3908 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3909 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
3910 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3911 // CHECK2:       omp.precond.then:
3912 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3913 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3914 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
3915 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3916 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3917 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3918 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3919 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3920 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3921 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3922 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3923 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3924 // CHECK2:       cond.true:
3925 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3926 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3927 // CHECK2:       cond.false:
3928 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3929 // CHECK2-NEXT:    br label [[COND_END]]
3930 // CHECK2:       cond.end:
3931 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3932 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3933 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3934 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3935 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3936 // CHECK2:       omp.inner.for.cond:
3937 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3938 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
3939 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3940 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3941 // CHECK2:       omp.inner.for.body:
3942 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
3943 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
3944 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
3945 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
3946 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
3947 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
3948 // CHECK2-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
3949 // CHECK2-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
3950 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
3951 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3952 // CHECK2:       omp.inner.for.inc:
3953 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3954 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
3955 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3956 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
3957 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
3958 // CHECK2:       omp.inner.for.end:
3959 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3960 // CHECK2:       omp.loop.exit:
3961 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3962 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
3963 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
3964 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3965 // CHECK2-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
3966 // CHECK2-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3967 // CHECK2:       .omp.final.then:
3968 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3969 // CHECK2-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
3970 // CHECK2-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
3971 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
3972 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
3973 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
3974 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3975 // CHECK2:       .omp.final.done:
3976 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3977 // CHECK2:       omp.precond.end:
3978 // CHECK2-NEXT:    ret void
3979 //
3980 //
3981 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15
3982 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
3983 // CHECK2-NEXT:  entry:
3984 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3985 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3986 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3987 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3988 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
3989 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
3990 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
3991 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
3992 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
3993 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3994 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3995 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3996 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3997 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3998 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3999 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4000 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4001 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4002 // CHECK2-NEXT:    [[I6:%.*]] = alloca i32, align 4
4003 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
4004 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4005 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4006 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4007 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4008 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4009 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4010 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4011 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4012 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
4013 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4014 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4015 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4016 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4017 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
4018 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4019 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4020 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4021 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4022 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4023 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4024 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4025 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4026 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4027 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4028 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4029 // CHECK2:       omp.precond.then:
4030 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4031 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4032 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4033 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4034 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
4035 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4036 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
4037 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
4038 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
4039 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4040 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4041 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
4042 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4043 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
4044 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
4045 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4046 // CHECK2:       omp.dispatch.cond:
4047 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4048 // CHECK2-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
4049 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4050 // CHECK2-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
4051 // CHECK2-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4052 // CHECK2:       cond.true:
4053 // CHECK2-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4054 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4055 // CHECK2:       cond.false:
4056 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4057 // CHECK2-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
4058 // CHECK2-NEXT:    br label [[COND_END]]
4059 // CHECK2:       cond.end:
4060 // CHECK2-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
4061 // CHECK2-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
4062 // CHECK2-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
4063 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4064 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4065 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4066 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4067 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4068 // CHECK2-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4069 // CHECK2:       omp.dispatch.body:
4070 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4071 // CHECK2:       omp.inner.for.cond:
4072 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4073 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
4074 // CHECK2-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
4075 // CHECK2-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4076 // CHECK2:       omp.inner.for.body:
4077 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4078 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
4079 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4080 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
4081 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
4082 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4083 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
4084 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
4085 // CHECK2-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
4086 // CHECK2-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
4087 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4088 // CHECK2-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
4089 // CHECK2-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM13]]
4090 // CHECK2-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !40
4091 // CHECK2-NEXT:    [[ADD15:%.*]] = fadd double [[TMP25]], [[TMP28]]
4092 // CHECK2-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
4093 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
4094 // CHECK2-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
4095 // CHECK2-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM16]]
4096 // CHECK2-NEXT:    store double [[ADD15]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !40
4097 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
4098 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
4099 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
4100 // CHECK2-NEXT:    store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
4101 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
4102 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
4103 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
4104 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
4105 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
4106 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4107 // CHECK2:       omp.body.continue:
4108 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4109 // CHECK2:       omp.inner.for.inc:
4110 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4111 // CHECK2-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP35]], 1
4112 // CHECK2-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
4113 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
4114 // CHECK2:       omp.inner.for.end:
4115 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4116 // CHECK2:       omp.dispatch.inc:
4117 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4118 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4119 // CHECK2-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
4120 // CHECK2-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
4121 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4122 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4123 // CHECK2-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
4124 // CHECK2-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
4125 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4126 // CHECK2:       omp.dispatch.end:
4127 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4128 // CHECK2-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
4129 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
4130 // CHECK2-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4131 // CHECK2-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
4132 // CHECK2-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4133 // CHECK2:       .omp.final.then:
4134 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4135 // CHECK2-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP44]], 0
4136 // CHECK2-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
4137 // CHECK2-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
4138 // CHECK2-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
4139 // CHECK2-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
4140 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4141 // CHECK2:       .omp.final.done:
4142 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4143 // CHECK2:       omp.precond.end:
4144 // CHECK2-NEXT:    ret void
4145 //
4146 //
4147 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
4148 // CHECK2-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4149 // CHECK2-NEXT:  entry:
4150 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
4151 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
4152 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
4153 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
4154 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
4155 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
4156 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
4157 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
4158 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
4159 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4160 // CHECK2-NEXT:    ret void
4161 //
4162 //
4163 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..18
4164 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4165 // CHECK2-NEXT:  entry:
4166 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4167 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4168 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4169 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4170 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4171 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4172 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4173 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4174 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4175 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4176 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4177 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4178 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4179 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4180 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4181 // CHECK2-NEXT:    [[I3:%.*]] = alloca i32, align 4
4182 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4183 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4184 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4185 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4186 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4187 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4188 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4189 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4190 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4191 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4192 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4193 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4194 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4195 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4196 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4197 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4198 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4199 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4200 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4201 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4202 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4203 // CHECK2:       omp.precond.then:
4204 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4205 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4206 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4207 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4208 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4209 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4210 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4211 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4212 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4213 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4214 // CHECK2-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4215 // CHECK2-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4216 // CHECK2:       cond.true:
4217 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4218 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4219 // CHECK2:       cond.false:
4220 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4221 // CHECK2-NEXT:    br label [[COND_END]]
4222 // CHECK2:       cond.end:
4223 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4224 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4225 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4226 // CHECK2-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4227 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4228 // CHECK2:       omp.inner.for.cond:
4229 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4230 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
4231 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4232 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4233 // CHECK2:       omp.inner.for.body:
4234 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
4235 // CHECK2-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
4236 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
4237 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
4238 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
4239 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4240 // CHECK2:       omp.inner.for.inc:
4241 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4242 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
4243 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
4244 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
4245 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
4246 // CHECK2:       omp.inner.for.end:
4247 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4248 // CHECK2:       omp.loop.exit:
4249 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4250 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
4251 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
4252 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4253 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4254 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4255 // CHECK2:       .omp.final.then:
4256 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4257 // CHECK2-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
4258 // CHECK2-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4259 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4260 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4261 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
4262 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4263 // CHECK2:       .omp.final.done:
4264 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4265 // CHECK2:       omp.precond.end:
4266 // CHECK2-NEXT:    ret void
4267 //
4268 //
4269 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..19
4270 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4271 // CHECK2-NEXT:  entry:
4272 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4273 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4274 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4275 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4276 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4277 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4278 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4279 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4280 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4281 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4282 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4283 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4284 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4285 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4286 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4287 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4288 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4289 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
4290 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
4291 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4292 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4293 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4294 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4295 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4296 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4297 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4298 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4299 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4300 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4301 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4302 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4303 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4304 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4305 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4306 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4307 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4308 // CHECK2-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4309 // CHECK2-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4310 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4311 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4312 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4313 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4314 // CHECK2:       omp.precond.then:
4315 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4316 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4317 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4318 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4319 // CHECK2-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
4320 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4321 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
4322 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
4323 // CHECK2-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
4324 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4325 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4326 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4327 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4328 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4329 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
4330 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
4331 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4332 // CHECK2:       omp.dispatch.cond:
4333 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4334 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
4335 // CHECK2-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4336 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
4337 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4338 // CHECK2:       omp.dispatch.body:
4339 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4340 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4341 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4342 // CHECK2:       omp.inner.for.cond:
4343 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4344 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
4345 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4346 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4347 // CHECK2:       omp.inner.for.body:
4348 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4349 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
4350 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4351 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
4352 // CHECK2-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
4353 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4354 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
4355 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
4356 // CHECK2-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
4357 // CHECK2-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
4358 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4359 // CHECK2-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
4360 // CHECK2-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
4361 // CHECK2-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
4362 // CHECK2-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
4363 // CHECK2-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
4364 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
4365 // CHECK2-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
4366 // CHECK2-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
4367 // CHECK2-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
4368 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
4369 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
4370 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
4371 // CHECK2-NEXT:    store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
4372 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
4373 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
4374 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
4375 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
4376 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
4377 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4378 // CHECK2:       omp.body.continue:
4379 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4380 // CHECK2:       omp.inner.for.inc:
4381 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4382 // CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
4383 // CHECK2-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
4384 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
4385 // CHECK2:       omp.inner.for.end:
4386 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4387 // CHECK2:       omp.dispatch.inc:
4388 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4389 // CHECK2:       omp.dispatch.end:
4390 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4391 // CHECK2-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4392 // CHECK2-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4393 // CHECK2:       .omp.final.then:
4394 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4395 // CHECK2-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
4396 // CHECK2-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
4397 // CHECK2-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
4398 // CHECK2-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
4399 // CHECK2-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
4400 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4401 // CHECK2:       .omp.final.done:
4402 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4403 // CHECK2:       omp.precond.end:
4404 // CHECK2-NEXT:    ret void
4405 //
4406 //
4407 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
4408 // CHECK2-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4409 // CHECK2-NEXT:  entry:
4410 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
4411 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
4412 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
4413 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
4414 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
4415 // CHECK2-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
4416 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
4417 // CHECK2-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
4418 // CHECK2-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
4419 // CHECK2-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
4420 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
4421 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
4422 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4423 // CHECK2-NEXT:    ret void
4424 //
4425 //
4426 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..22
4427 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
4428 // CHECK2-NEXT:  entry:
4429 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4430 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4431 // CHECK2-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
4432 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4433 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4434 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4435 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4436 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4437 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4438 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4439 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4440 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4441 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4442 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4443 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4444 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4445 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4446 // CHECK2-NEXT:    [[I4:%.*]] = alloca i32, align 4
4447 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
4448 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4449 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4450 // CHECK2-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
4451 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4452 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4453 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4454 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4455 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
4456 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4457 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
4458 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
4459 // CHECK2-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
4460 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
4461 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
4462 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
4463 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4464 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4465 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
4466 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4467 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4468 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4469 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4470 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4471 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
4472 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4473 // CHECK2:       omp.precond.then:
4474 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4475 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4476 // CHECK2-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
4477 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4478 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4479 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4480 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4481 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4482 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4483 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4484 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4485 // CHECK2-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4486 // CHECK2:       cond.true:
4487 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4488 // CHECK2-NEXT:    br label [[COND_END:%.*]]
4489 // CHECK2:       cond.false:
4490 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4491 // CHECK2-NEXT:    br label [[COND_END]]
4492 // CHECK2:       cond.end:
4493 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4494 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4495 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4496 // CHECK2-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4497 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4498 // CHECK2:       omp.inner.for.cond:
4499 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4500 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
4501 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4502 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4503 // CHECK2:       omp.inner.for.body:
4504 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
4505 // CHECK2-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
4506 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
4507 // CHECK2-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
4508 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
4509 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
4510 // CHECK2-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
4511 // CHECK2-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
4512 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
4513 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4514 // CHECK2:       omp.inner.for.inc:
4515 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4516 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
4517 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
4518 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
4519 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
4520 // CHECK2:       omp.inner.for.end:
4521 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4522 // CHECK2:       omp.loop.exit:
4523 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4524 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
4525 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
4526 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4527 // CHECK2-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
4528 // CHECK2-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4529 // CHECK2:       .omp.final.then:
4530 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4531 // CHECK2-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
4532 // CHECK2-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
4533 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
4534 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
4535 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
4536 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4537 // CHECK2:       .omp.final.done:
4538 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4539 // CHECK2:       omp.precond.end:
4540 // CHECK2-NEXT:    ret void
4541 //
4542 //
4543 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..23
4544 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
4545 // CHECK2-NEXT:  entry:
4546 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4547 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4548 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4549 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4550 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
4551 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
4552 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
4553 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
4554 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
4555 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4556 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4557 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4558 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4559 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
4560 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4561 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4562 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4563 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4564 // CHECK2-NEXT:    [[I6:%.*]] = alloca i32, align 4
4565 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
4566 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
4567 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
4568 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4569 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4570 // CHECK2-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
4571 // CHECK2-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
4572 // CHECK2-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
4573 // CHECK2-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
4574 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
4575 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
4576 // CHECK2-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
4577 // CHECK2-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
4578 // CHECK2-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
4579 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
4580 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4581 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4582 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4583 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4584 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4585 // CHECK2-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4586 // CHECK2-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4587 // CHECK2-NEXT:    store i32 0, i32* [[I]], align 4
4588 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4589 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4590 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4591 // CHECK2:       omp.precond.then:
4592 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4593 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4594 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4595 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
4596 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
4597 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
4598 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
4599 // CHECK2-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
4600 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
4601 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4602 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4603 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
4604 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4605 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4606 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4607 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
4608 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
4609 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4610 // CHECK2:       omp.dispatch.cond:
4611 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
4612 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
4613 // CHECK2-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4614 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
4615 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4616 // CHECK2:       omp.dispatch.body:
4617 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4618 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
4619 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4620 // CHECK2:       omp.inner.for.cond:
4621 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4622 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
4623 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4624 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4625 // CHECK2:       omp.inner.for.body:
4626 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4627 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
4628 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4629 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
4630 // CHECK2-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
4631 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4632 // CHECK2-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
4633 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
4634 // CHECK2-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
4635 // CHECK2-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
4636 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4637 // CHECK2-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
4638 // CHECK2-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
4639 // CHECK2-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
4640 // CHECK2-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
4641 // CHECK2-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
4642 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
4643 // CHECK2-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
4644 // CHECK2-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
4645 // CHECK2-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
4646 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
4647 // CHECK2-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
4648 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
4649 // CHECK2-NEXT:    store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
4650 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
4651 // CHECK2-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
4652 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
4653 // CHECK2-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
4654 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
4655 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4656 // CHECK2:       omp.body.continue:
4657 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4658 // CHECK2:       omp.inner.for.inc:
4659 // CHECK2-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4660 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
4661 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
4662 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
4663 // CHECK2:       omp.inner.for.end:
4664 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4665 // CHECK2:       omp.dispatch.inc:
4666 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
4667 // CHECK2:       omp.dispatch.end:
4668 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4669 // CHECK2-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4670 // CHECK2-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4671 // CHECK2:       .omp.final.then:
4672 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4673 // CHECK2-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
4674 // CHECK2-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
4675 // CHECK2-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
4676 // CHECK2-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
4677 // CHECK2-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
4678 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4679 // CHECK2:       .omp.final.done:
4680 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
4681 // CHECK2:       omp.precond.end:
4682 // CHECK2-NEXT:    ret void
4683 //
4684 //
4685 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4686 // CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
4687 // CHECK2-NEXT:  entry:
4688 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
4689 // CHECK2-NEXT:    ret void
4690 //
4691 //
4692 // CHECK3-LABEL: define {{[^@]+}}@main
4693 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
4694 // CHECK3-NEXT:  entry:
4695 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4696 // CHECK3-NEXT:    [[A:%.*]] = alloca double*, align 4
4697 // CHECK3-NEXT:    [[B:%.*]] = alloca double*, align 4
4698 // CHECK3-NEXT:    [[C:%.*]] = alloca double*, align 4
4699 // CHECK3-NEXT:    [[N:%.*]] = alloca i32, align 4
4700 // CHECK3-NEXT:    [[CH:%.*]] = alloca i32, align 4
4701 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
4702 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4703 // CHECK3-NEXT:    store i32 10000, i32* [[N]], align 4
4704 // CHECK3-NEXT:    store i32 100, i32* [[CH]], align 4
4705 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4706 // CHECK3-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
4707 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4708 // CHECK3-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
4709 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4710 // CHECK3-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
4711 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4712 // CHECK3-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
4713 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4714 // CHECK3-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
4715 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
4716 // CHECK3-NEXT:    ret i32 0
4717 //
4718 //
4719 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
4720 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
4721 // CHECK3-NEXT:  entry:
4722 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4723 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4724 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4725 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4726 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4727 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4728 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4729 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4730 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4731 // CHECK3-NEXT:    ret void
4732 //
4733 //
4734 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
4735 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4736 // CHECK3-NEXT:  entry:
4737 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4738 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4739 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4740 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4741 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4742 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4743 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4744 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4745 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4746 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4747 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4748 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4749 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4750 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4751 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4752 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4753 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4754 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4755 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4756 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4757 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4758 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4759 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4760 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4761 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4762 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4763 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4764 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4765 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4766 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4767 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4768 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4769 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4770 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4771 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4772 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4773 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4774 // CHECK3:       omp.precond.then:
4775 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4776 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4777 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4778 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4779 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4780 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4781 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4782 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4783 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4784 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4785 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4786 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4787 // CHECK3:       cond.true:
4788 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4789 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4790 // CHECK3:       cond.false:
4791 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4792 // CHECK3-NEXT:    br label [[COND_END]]
4793 // CHECK3:       cond.end:
4794 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4795 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4796 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4797 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4798 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4799 // CHECK3:       omp.inner.for.cond:
4800 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4801 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
4802 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4803 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4804 // CHECK3:       omp.inner.for.body:
4805 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
4806 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
4807 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
4808 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4809 // CHECK3:       omp.inner.for.inc:
4810 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4811 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
4812 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4813 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4814 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
4815 // CHECK3:       omp.inner.for.end:
4816 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4817 // CHECK3:       omp.loop.exit:
4818 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4819 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4820 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4821 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4822 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4823 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4824 // CHECK3:       .omp.final.then:
4825 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4826 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
4827 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4828 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4829 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4830 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
4831 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4832 // CHECK3:       .omp.final.done:
4833 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4834 // CHECK3:       omp.precond.end:
4835 // CHECK3-NEXT:    ret void
4836 //
4837 //
4838 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
4839 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4840 // CHECK3-NEXT:  entry:
4841 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4842 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4843 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4844 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4845 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4846 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4847 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4848 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4849 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4850 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4851 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4852 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4853 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4854 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4855 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4856 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4857 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4858 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4859 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
4860 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4861 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4862 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4863 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4864 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4865 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4866 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4867 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4868 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4869 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4870 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4871 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4872 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4873 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4874 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4875 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4876 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4877 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4878 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4879 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4880 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4881 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4882 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4883 // CHECK3:       omp.precond.then:
4884 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4885 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4886 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4887 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4888 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4889 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4890 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4891 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4892 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4893 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4894 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4895 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4896 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4897 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4898 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4899 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4900 // CHECK3:       cond.true:
4901 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4902 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4903 // CHECK3:       cond.false:
4904 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4905 // CHECK3-NEXT:    br label [[COND_END]]
4906 // CHECK3:       cond.end:
4907 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4908 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4909 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4910 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4911 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4912 // CHECK3:       omp.inner.for.cond:
4913 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4914 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
4915 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4916 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4917 // CHECK3:       omp.inner.for.body:
4918 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4919 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
4920 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4921 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
4922 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
4923 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4924 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
4925 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
4926 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
4927 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4928 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
4929 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
4930 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
4931 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
4932 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
4933 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
4934 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
4935 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
4936 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
4937 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
4938 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
4939 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
4940 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
4941 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
4942 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
4943 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
4944 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4945 // CHECK3:       omp.body.continue:
4946 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4947 // CHECK3:       omp.inner.for.inc:
4948 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4949 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
4950 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
4951 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
4952 // CHECK3:       omp.inner.for.end:
4953 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4954 // CHECK3:       omp.loop.exit:
4955 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4956 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
4957 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
4958 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4959 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4960 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4961 // CHECK3:       .omp.final.then:
4962 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4963 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
4964 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
4965 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
4966 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
4967 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
4968 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4969 // CHECK3:       .omp.final.done:
4970 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4971 // CHECK3:       omp.precond.end:
4972 // CHECK3-NEXT:    ret void
4973 //
4974 //
4975 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
4976 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
4977 // CHECK3-NEXT:  entry:
4978 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4979 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4980 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4981 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4982 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4983 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4984 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4985 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4986 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4987 // CHECK3-NEXT:    ret void
4988 //
4989 //
4990 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
4991 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4992 // CHECK3-NEXT:  entry:
4993 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4994 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4995 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4996 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4997 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4998 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4999 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5000 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5001 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5002 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5003 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5004 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5005 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5006 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5007 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5008 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5009 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5010 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5011 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5012 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5013 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5014 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5015 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5016 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5017 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5018 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5019 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5020 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5021 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5022 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5023 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5024 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5025 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5026 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5027 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5028 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5029 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5030 // CHECK3:       omp.precond.then:
5031 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5032 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5033 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5034 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5035 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5036 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5037 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5038 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5039 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5040 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5041 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5042 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5043 // CHECK3:       cond.true:
5044 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5045 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5046 // CHECK3:       cond.false:
5047 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5048 // CHECK3-NEXT:    br label [[COND_END]]
5049 // CHECK3:       cond.end:
5050 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5051 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5052 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5053 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5054 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5055 // CHECK3:       omp.inner.for.cond:
5056 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5057 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
5058 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5059 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5060 // CHECK3:       omp.inner.for.body:
5061 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
5062 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
5063 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
5064 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5065 // CHECK3:       omp.inner.for.inc:
5066 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5067 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
5068 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
5069 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
5070 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
5071 // CHECK3:       omp.inner.for.end:
5072 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5073 // CHECK3:       omp.loop.exit:
5074 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5075 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
5076 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
5077 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5078 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5079 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5080 // CHECK3:       .omp.final.then:
5081 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5082 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
5083 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5084 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5085 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5086 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5087 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5088 // CHECK3:       .omp.final.done:
5089 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5090 // CHECK3:       omp.precond.end:
5091 // CHECK3-NEXT:    ret void
5092 //
5093 //
5094 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
5095 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5096 // CHECK3-NEXT:  entry:
5097 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5098 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5099 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5100 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5101 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5102 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5103 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5104 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5105 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5106 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5107 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5108 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5109 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5110 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5111 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5112 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5113 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5114 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5115 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
5116 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5117 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5118 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5119 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5120 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5121 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5122 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5123 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5124 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5125 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5126 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5127 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5128 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5129 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5130 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5131 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5132 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5133 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5134 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5135 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5136 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5137 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5138 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5139 // CHECK3:       omp.precond.then:
5140 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5141 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5142 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5143 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5144 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5145 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5146 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5147 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5148 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5149 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5150 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5151 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5152 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5153 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5154 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5155 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5156 // CHECK3:       cond.true:
5157 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5158 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5159 // CHECK3:       cond.false:
5160 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5161 // CHECK3-NEXT:    br label [[COND_END]]
5162 // CHECK3:       cond.end:
5163 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5164 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5165 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5166 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5167 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5168 // CHECK3:       omp.inner.for.cond:
5169 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5170 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
5171 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5172 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5173 // CHECK3:       omp.inner.for.body:
5174 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5175 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5176 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5177 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
5178 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
5179 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5180 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5181 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
5182 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
5183 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5184 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5185 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
5186 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5187 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
5188 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
5189 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5190 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
5191 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
5192 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
5193 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
5194 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
5195 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
5196 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
5197 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
5198 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
5199 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
5200 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5201 // CHECK3:       omp.body.continue:
5202 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5203 // CHECK3:       omp.inner.for.inc:
5204 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5205 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5206 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
5207 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
5208 // CHECK3:       omp.inner.for.end:
5209 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5210 // CHECK3:       omp.loop.exit:
5211 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5212 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5213 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5214 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5215 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5216 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5217 // CHECK3:       .omp.final.then:
5218 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5219 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5220 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5221 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5222 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5223 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5224 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5225 // CHECK3:       .omp.final.done:
5226 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5227 // CHECK3:       omp.precond.end:
5228 // CHECK3-NEXT:    ret void
5229 //
5230 //
5231 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
5232 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5233 // CHECK3-NEXT:  entry:
5234 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
5235 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5236 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5237 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5238 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5239 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
5240 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5241 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5242 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5243 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5244 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5245 // CHECK3-NEXT:    ret void
5246 //
5247 //
5248 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
5249 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5250 // CHECK3-NEXT:  entry:
5251 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5252 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5253 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
5254 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5255 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5256 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5257 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5258 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5259 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5260 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5261 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5262 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5263 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5264 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5265 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5266 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5267 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5268 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5269 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5270 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
5271 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5272 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5273 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5274 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5275 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
5276 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5277 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
5278 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
5279 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
5280 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
5281 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5282 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5283 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
5284 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5285 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5286 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5287 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5288 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5289 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
5290 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5291 // CHECK3:       omp.precond.then:
5292 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5293 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5294 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
5295 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5296 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5297 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
5298 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5299 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5300 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
5301 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5302 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5303 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5304 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5305 // CHECK3:       cond.true:
5306 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5307 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5308 // CHECK3:       cond.false:
5309 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5310 // CHECK3-NEXT:    br label [[COND_END]]
5311 // CHECK3:       cond.end:
5312 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5313 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5314 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5315 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5316 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5317 // CHECK3:       omp.inner.for.cond:
5318 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5319 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5320 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
5321 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
5322 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5323 // CHECK3:       omp.inner.for.body:
5324 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5325 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5326 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
5327 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5328 // CHECK3:       omp.inner.for.inc:
5329 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5330 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5331 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5332 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5333 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5334 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5335 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5336 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5337 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5338 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5339 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
5340 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5341 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5342 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5343 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
5344 // CHECK3-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
5345 // CHECK3:       cond.true10:
5346 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
5347 // CHECK3-NEXT:    br label [[COND_END12:%.*]]
5348 // CHECK3:       cond.false11:
5349 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5350 // CHECK3-NEXT:    br label [[COND_END12]]
5351 // CHECK3:       cond.end12:
5352 // CHECK3-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
5353 // CHECK3-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5354 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5355 // CHECK3-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5356 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
5357 // CHECK3:       omp.inner.for.end:
5358 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5359 // CHECK3:       omp.loop.exit:
5360 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5361 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
5362 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
5363 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5364 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
5365 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5366 // CHECK3:       .omp.final.then:
5367 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5368 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
5369 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
5370 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
5371 // CHECK3-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
5372 // CHECK3-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
5373 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5374 // CHECK3:       .omp.final.done:
5375 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5376 // CHECK3:       omp.precond.end:
5377 // CHECK3-NEXT:    ret void
5378 //
5379 //
5380 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
5381 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5382 // CHECK3-NEXT:  entry:
5383 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5384 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5385 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5386 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5387 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5388 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5389 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5390 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5391 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5392 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5393 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5394 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5395 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5396 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5397 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5398 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5399 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5400 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5401 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
5402 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5403 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5404 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5405 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5406 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5407 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5408 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5409 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5410 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5411 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5412 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5413 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5414 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5415 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5416 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5417 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5418 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5419 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5420 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5421 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5422 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5423 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5424 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5425 // CHECK3:       omp.precond.then:
5426 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5427 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5428 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5429 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5430 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5431 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5432 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5433 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5434 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5435 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5436 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5437 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5438 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5439 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5440 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5441 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5442 // CHECK3:       cond.true:
5443 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5444 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5445 // CHECK3:       cond.false:
5446 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5447 // CHECK3-NEXT:    br label [[COND_END]]
5448 // CHECK3:       cond.end:
5449 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5450 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5451 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5452 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5453 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5454 // CHECK3:       omp.inner.for.cond:
5455 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5456 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
5457 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5458 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5459 // CHECK3:       omp.inner.for.body:
5460 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5461 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5462 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5463 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
5464 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
5465 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5466 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5467 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
5468 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
5469 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5470 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5471 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
5472 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5473 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
5474 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
5475 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5476 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
5477 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
5478 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
5479 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
5480 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
5481 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
5482 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
5483 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
5484 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
5485 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
5486 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5487 // CHECK3:       omp.body.continue:
5488 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5489 // CHECK3:       omp.inner.for.inc:
5490 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5491 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5492 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5493 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
5494 // CHECK3:       omp.inner.for.end:
5495 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5496 // CHECK3:       omp.loop.exit:
5497 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5498 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5499 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5500 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5501 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5502 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5503 // CHECK3:       .omp.final.then:
5504 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5505 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5506 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5507 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5508 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5509 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5510 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5511 // CHECK3:       .omp.final.done:
5512 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5513 // CHECK3:       omp.precond.end:
5514 // CHECK3-NEXT:    ret void
5515 //
5516 //
5517 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
5518 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5519 // CHECK3-NEXT:  entry:
5520 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5521 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5522 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5523 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5524 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5525 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5526 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5527 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5528 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5529 // CHECK3-NEXT:    ret void
5530 //
5531 //
5532 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
5533 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5534 // CHECK3-NEXT:  entry:
5535 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5536 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5537 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5538 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5539 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5540 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5541 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5542 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5543 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5544 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5545 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5546 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5547 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5548 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5549 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5550 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5551 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5552 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5553 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5554 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5555 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5556 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5557 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5558 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5559 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5560 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5561 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5562 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5563 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5564 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5565 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5566 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5567 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5568 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5569 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5570 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5571 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5572 // CHECK3:       omp.precond.then:
5573 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5574 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5575 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5576 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5577 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5578 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5579 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5580 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5581 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5582 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5583 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5584 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5585 // CHECK3:       cond.true:
5586 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5587 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5588 // CHECK3:       cond.false:
5589 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5590 // CHECK3-NEXT:    br label [[COND_END]]
5591 // CHECK3:       cond.end:
5592 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5593 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5594 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5595 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5596 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5597 // CHECK3:       omp.inner.for.cond:
5598 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5599 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5600 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5601 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5602 // CHECK3:       omp.inner.for.body:
5603 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5604 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5605 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
5606 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5607 // CHECK3:       omp.inner.for.inc:
5608 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5609 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5610 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
5611 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5612 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
5613 // CHECK3:       omp.inner.for.end:
5614 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5615 // CHECK3:       omp.loop.exit:
5616 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5617 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
5618 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
5619 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5620 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5621 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5622 // CHECK3:       .omp.final.then:
5623 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5624 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
5625 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5626 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5627 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5628 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5629 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5630 // CHECK3:       .omp.final.done:
5631 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5632 // CHECK3:       omp.precond.end:
5633 // CHECK3-NEXT:    ret void
5634 //
5635 //
5636 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
5637 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5638 // CHECK3-NEXT:  entry:
5639 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5640 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5641 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5642 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5643 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5644 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5645 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5646 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5647 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5648 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5649 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5650 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5651 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5652 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5653 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5654 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5655 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5656 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
5657 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
5658 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5659 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5660 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5661 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5662 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5663 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5664 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5665 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5666 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5667 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5668 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5669 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5670 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5671 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5672 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5673 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5674 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5675 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5676 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5677 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5678 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5679 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5680 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5681 // CHECK3:       omp.precond.then:
5682 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5683 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5684 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5685 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5686 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5687 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5688 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5689 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5690 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5691 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5692 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5693 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5694 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5695 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5696 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5697 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5698 // CHECK3:       cond.true:
5699 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5700 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5701 // CHECK3:       cond.false:
5702 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5703 // CHECK3-NEXT:    br label [[COND_END]]
5704 // CHECK3:       cond.end:
5705 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5706 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5707 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5708 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5709 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5710 // CHECK3:       omp.inner.for.cond:
5711 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5712 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
5713 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5714 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5715 // CHECK3:       omp.inner.for.body:
5716 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5717 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5718 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5719 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
5720 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
5721 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5722 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
5723 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
5724 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
5725 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5726 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
5727 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
5728 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
5729 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
5730 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
5731 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
5732 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
5733 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
5734 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
5735 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
5736 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
5737 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
5738 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
5739 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
5740 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
5741 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
5742 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5743 // CHECK3:       omp.body.continue:
5744 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5745 // CHECK3:       omp.inner.for.inc:
5746 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5747 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
5748 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5749 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
5750 // CHECK3:       omp.inner.for.end:
5751 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5752 // CHECK3:       omp.loop.exit:
5753 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5754 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
5755 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
5756 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5757 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
5758 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5759 // CHECK3:       .omp.final.then:
5760 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5761 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
5762 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
5763 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
5764 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
5765 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
5766 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5767 // CHECK3:       .omp.final.done:
5768 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5769 // CHECK3:       omp.precond.end:
5770 // CHECK3-NEXT:    ret void
5771 //
5772 //
5773 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
5774 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
5775 // CHECK3-NEXT:  entry:
5776 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
5777 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5778 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
5779 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
5780 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
5781 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
5782 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5783 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
5784 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
5785 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
5786 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5787 // CHECK3-NEXT:    ret void
5788 //
5789 //
5790 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
5791 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
5792 // CHECK3-NEXT:  entry:
5793 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5794 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5795 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
5796 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5797 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5798 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5799 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5800 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5801 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5802 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5803 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5804 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5805 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5806 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5807 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5808 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5809 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5810 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
5811 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
5812 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5813 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5814 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
5815 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5816 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5817 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5818 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5819 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
5820 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5821 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
5822 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
5823 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
5824 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
5825 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5826 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
5827 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5828 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5829 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
5830 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5831 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5832 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5833 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5834 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5835 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
5836 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5837 // CHECK3:       omp.precond.then:
5838 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5839 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5840 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
5841 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5842 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5843 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5844 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5845 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5846 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5847 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5848 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5849 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5850 // CHECK3:       cond.true:
5851 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5852 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5853 // CHECK3:       cond.false:
5854 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5855 // CHECK3-NEXT:    br label [[COND_END]]
5856 // CHECK3:       cond.end:
5857 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5858 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5859 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5860 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5861 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5862 // CHECK3:       omp.inner.for.cond:
5863 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5864 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
5865 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5866 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5867 // CHECK3:       omp.inner.for.body:
5868 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
5869 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
5870 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
5871 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
5872 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
5873 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
5874 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5875 // CHECK3:       omp.inner.for.inc:
5876 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5877 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
5878 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5879 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
5880 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
5881 // CHECK3:       omp.inner.for.end:
5882 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5883 // CHECK3:       omp.loop.exit:
5884 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5885 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
5886 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
5887 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5888 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
5889 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5890 // CHECK3:       .omp.final.then:
5891 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5892 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
5893 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
5894 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
5895 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
5896 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
5897 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5898 // CHECK3:       .omp.final.done:
5899 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5900 // CHECK3:       omp.precond.end:
5901 // CHECK3-NEXT:    ret void
5902 //
5903 //
5904 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
5905 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
5906 // CHECK3-NEXT:  entry:
5907 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5908 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5909 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5910 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5911 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
5912 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
5913 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
5914 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
5915 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
5916 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5917 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5918 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5919 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5920 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5921 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5922 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5923 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5924 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5925 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
5926 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
5927 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5928 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5929 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5930 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5931 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
5932 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
5933 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
5934 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
5935 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5936 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
5937 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
5938 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
5939 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
5940 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5941 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5942 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5943 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5944 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5945 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5946 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5947 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
5948 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5949 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5950 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5951 // CHECK3:       omp.precond.then:
5952 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5953 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5954 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5955 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5956 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5957 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
5958 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
5959 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5960 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5961 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5962 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5963 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
5964 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
5965 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5966 // CHECK3:       omp.dispatch.cond:
5967 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5968 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5969 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
5970 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5971 // CHECK3:       cond.true:
5972 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5973 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5974 // CHECK3:       cond.false:
5975 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5976 // CHECK3-NEXT:    br label [[COND_END]]
5977 // CHECK3:       cond.end:
5978 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
5979 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5980 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5981 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
5982 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5983 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5984 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
5985 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5986 // CHECK3:       omp.dispatch.body:
5987 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5988 // CHECK3:       omp.inner.for.cond:
5989 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
5990 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
5991 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
5992 // CHECK3-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5993 // CHECK3:       omp.inner.for.body:
5994 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
5995 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
5996 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5997 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
5998 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
5999 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6000 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
6001 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
6002 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
6003 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6004 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
6005 // CHECK3-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
6006 // CHECK3-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
6007 // CHECK3-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
6008 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6009 // CHECK3-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
6010 // CHECK3-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
6011 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
6012 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
6013 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
6014 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
6015 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
6016 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
6017 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
6018 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
6019 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
6020 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6021 // CHECK3:       omp.body.continue:
6022 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6023 // CHECK3:       omp.inner.for.inc:
6024 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6025 // CHECK3-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
6026 // CHECK3-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6027 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
6028 // CHECK3:       omp.inner.for.end:
6029 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6030 // CHECK3:       omp.dispatch.inc:
6031 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6032 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6033 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
6034 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
6035 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6036 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6037 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
6038 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
6039 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6040 // CHECK3:       omp.dispatch.end:
6041 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6042 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
6043 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
6044 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6045 // CHECK3-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
6046 // CHECK3-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6047 // CHECK3:       .omp.final.then:
6048 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6049 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
6050 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
6051 // CHECK3-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
6052 // CHECK3-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
6053 // CHECK3-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
6054 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6055 // CHECK3:       .omp.final.done:
6056 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6057 // CHECK3:       omp.precond.end:
6058 // CHECK3-NEXT:    ret void
6059 //
6060 //
6061 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
6062 // CHECK3-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6063 // CHECK3-NEXT:  entry:
6064 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6065 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6066 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6067 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6068 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6069 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6070 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6071 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6072 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6073 // CHECK3-NEXT:    ret void
6074 //
6075 //
6076 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18
6077 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6078 // CHECK3-NEXT:  entry:
6079 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6080 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6081 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6082 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6083 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6084 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6085 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6086 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6087 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6088 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6089 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6090 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6091 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6092 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6093 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6094 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
6095 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6096 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6097 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6098 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6099 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6100 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6101 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6102 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6103 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6104 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6105 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6106 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6107 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6108 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6109 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6110 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6111 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6112 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6113 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6114 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6115 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6116 // CHECK3:       omp.precond.then:
6117 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6118 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6119 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6120 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6121 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6122 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6123 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6124 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6125 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6126 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6127 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6128 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6129 // CHECK3:       cond.true:
6130 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6131 // CHECK3-NEXT:    br label [[COND_END:%.*]]
6132 // CHECK3:       cond.false:
6133 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6134 // CHECK3-NEXT:    br label [[COND_END]]
6135 // CHECK3:       cond.end:
6136 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6137 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6138 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6139 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6140 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6141 // CHECK3:       omp.inner.for.cond:
6142 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6143 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6144 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6145 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6146 // CHECK3:       omp.inner.for.body:
6147 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
6148 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6149 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
6150 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6151 // CHECK3:       omp.inner.for.inc:
6152 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6153 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
6154 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6155 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6156 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
6157 // CHECK3:       omp.inner.for.end:
6158 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6159 // CHECK3:       omp.loop.exit:
6160 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6161 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6162 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6163 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6164 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6165 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6166 // CHECK3:       .omp.final.then:
6167 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6168 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6169 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6170 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6171 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6172 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6173 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6174 // CHECK3:       .omp.final.done:
6175 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6176 // CHECK3:       omp.precond.end:
6177 // CHECK3-NEXT:    ret void
6178 //
6179 //
6180 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..19
6181 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6182 // CHECK3-NEXT:  entry:
6183 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6184 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6185 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6186 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6187 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6188 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6189 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6190 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6191 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6192 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6193 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6194 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6195 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6196 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6197 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6198 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6199 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6200 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
6201 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
6202 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6203 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6204 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6205 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6206 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6207 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6208 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6209 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6210 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6211 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6212 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6213 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6214 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6215 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6216 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6217 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6218 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6219 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6220 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6221 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6222 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6223 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6224 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6225 // CHECK3:       omp.precond.then:
6226 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6227 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6228 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6229 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6230 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6231 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6232 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6233 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6234 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6235 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6236 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6237 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6238 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
6239 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
6240 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6241 // CHECK3:       omp.dispatch.cond:
6242 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6243 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
6244 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6245 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6246 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6247 // CHECK3:       omp.dispatch.body:
6248 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6249 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6250 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6251 // CHECK3:       omp.inner.for.cond:
6252 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6253 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
6254 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6255 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6256 // CHECK3:       omp.inner.for.body:
6257 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6258 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6259 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6260 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
6261 // CHECK3-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
6262 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6263 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
6264 // CHECK3-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
6265 // CHECK3-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
6266 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6267 // CHECK3-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
6268 // CHECK3-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
6269 // CHECK3-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
6270 // CHECK3-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
6271 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
6272 // CHECK3-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
6273 // CHECK3-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
6274 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
6275 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
6276 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
6277 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
6278 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
6279 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
6280 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
6281 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
6282 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
6283 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6284 // CHECK3:       omp.body.continue:
6285 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6286 // CHECK3:       omp.inner.for.inc:
6287 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6288 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
6289 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6290 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
6291 // CHECK3:       omp.inner.for.end:
6292 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6293 // CHECK3:       omp.dispatch.inc:
6294 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6295 // CHECK3:       omp.dispatch.end:
6296 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6297 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
6298 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6299 // CHECK3:       .omp.final.then:
6300 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6301 // CHECK3-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
6302 // CHECK3-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
6303 // CHECK3-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
6304 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
6305 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
6306 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6307 // CHECK3:       .omp.final.done:
6308 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6309 // CHECK3:       omp.precond.end:
6310 // CHECK3-NEXT:    ret void
6311 //
6312 //
6313 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
6314 // CHECK3-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6315 // CHECK3-NEXT:  entry:
6316 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
6317 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6318 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6319 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6320 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6321 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
6322 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6323 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6324 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6325 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6326 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6327 // CHECK3-NEXT:    ret void
6328 //
6329 //
6330 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..22
6331 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6332 // CHECK3-NEXT:  entry:
6333 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6334 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6335 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
6336 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6337 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6338 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6339 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6340 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6341 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6342 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6343 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6344 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6345 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6346 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6347 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6348 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6349 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6350 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
6351 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
6352 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6353 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6354 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
6355 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6356 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6357 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6358 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6359 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
6360 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6361 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
6362 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
6363 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
6364 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6365 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6366 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6367 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6368 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6369 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6370 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6371 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6372 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6373 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6374 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6375 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6376 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6377 // CHECK3:       omp.precond.then:
6378 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6379 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6380 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6381 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6382 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6383 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6384 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6385 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6386 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6387 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6388 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6389 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6390 // CHECK3:       cond.true:
6391 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6392 // CHECK3-NEXT:    br label [[COND_END:%.*]]
6393 // CHECK3:       cond.false:
6394 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6395 // CHECK3-NEXT:    br label [[COND_END]]
6396 // CHECK3:       cond.end:
6397 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6398 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6399 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6400 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6401 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6402 // CHECK3:       omp.inner.for.cond:
6403 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6404 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6405 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6406 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6407 // CHECK3:       omp.inner.for.body:
6408 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
6409 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6410 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
6411 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
6412 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
6413 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
6414 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6415 // CHECK3:       omp.inner.for.inc:
6416 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6417 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
6418 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
6419 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6420 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
6421 // CHECK3:       omp.inner.for.end:
6422 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6423 // CHECK3:       omp.loop.exit:
6424 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6425 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
6426 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
6427 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6428 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
6429 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6430 // CHECK3:       .omp.final.then:
6431 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6432 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
6433 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6434 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6435 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6436 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
6437 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6438 // CHECK3:       .omp.final.done:
6439 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6440 // CHECK3:       omp.precond.end:
6441 // CHECK3-NEXT:    ret void
6442 //
6443 //
6444 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..23
6445 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
6446 // CHECK3-NEXT:  entry:
6447 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6448 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6449 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6450 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6451 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6452 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6453 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6454 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6455 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
6456 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6457 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6458 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6459 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6460 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
6461 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6462 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6463 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6464 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6465 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
6466 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
6467 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6468 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6469 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6470 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6471 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6472 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6473 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6474 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6475 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
6476 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6477 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6478 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6479 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6480 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6481 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6482 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6483 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6484 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6485 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6486 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6487 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
6488 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6489 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6490 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6491 // CHECK3:       omp.precond.then:
6492 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6493 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6494 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6495 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6496 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6497 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6498 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6499 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6500 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6501 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
6502 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6503 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6504 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6505 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
6506 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
6507 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6508 // CHECK3:       omp.dispatch.cond:
6509 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6510 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
6511 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6512 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
6513 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6514 // CHECK3:       omp.dispatch.body:
6515 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6516 // CHECK3-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
6517 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6518 // CHECK3:       omp.inner.for.cond:
6519 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6520 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
6521 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
6522 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6523 // CHECK3:       omp.inner.for.body:
6524 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6525 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
6526 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6527 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
6528 // CHECK3-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
6529 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6530 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
6531 // CHECK3-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
6532 // CHECK3-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
6533 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6534 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
6535 // CHECK3-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
6536 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
6537 // CHECK3-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
6538 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6539 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
6540 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
6541 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
6542 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
6543 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
6544 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
6545 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
6546 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
6547 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
6548 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
6549 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
6550 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6551 // CHECK3:       omp.body.continue:
6552 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6553 // CHECK3:       omp.inner.for.inc:
6554 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6555 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
6556 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6557 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
6558 // CHECK3:       omp.inner.for.end:
6559 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6560 // CHECK3:       omp.dispatch.inc:
6561 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
6562 // CHECK3:       omp.dispatch.end:
6563 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6564 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
6565 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6566 // CHECK3:       .omp.final.then:
6567 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6568 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
6569 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
6570 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
6571 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
6572 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
6573 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6574 // CHECK3:       .omp.final.done:
6575 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
6576 // CHECK3:       omp.precond.end:
6577 // CHECK3-NEXT:    ret void
6578 //
6579 //
6580 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
6581 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
6582 // CHECK3-NEXT:  entry:
6583 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
6584 // CHECK3-NEXT:    ret void
6585 //
6586 //
6587 // CHECK4-LABEL: define {{[^@]+}}@main
6588 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
6589 // CHECK4-NEXT:  entry:
6590 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6591 // CHECK4-NEXT:    [[A:%.*]] = alloca double*, align 4
6592 // CHECK4-NEXT:    [[B:%.*]] = alloca double*, align 4
6593 // CHECK4-NEXT:    [[C:%.*]] = alloca double*, align 4
6594 // CHECK4-NEXT:    [[N:%.*]] = alloca i32, align 4
6595 // CHECK4-NEXT:    [[CH:%.*]] = alloca i32, align 4
6596 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
6597 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6598 // CHECK4-NEXT:    store i32 10000, i32* [[N]], align 4
6599 // CHECK4-NEXT:    store i32 100, i32* [[CH]], align 4
6600 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
6601 // CHECK4-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
6602 // CHECK4-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
6603 // CHECK4-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
6604 // CHECK4-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
6605 // CHECK4-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
6606 // CHECK4-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
6607 // CHECK4-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
6608 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
6609 // CHECK4-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
6610 // CHECK4-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
6611 // CHECK4-NEXT:    ret i32 0
6612 //
6613 //
6614 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
6615 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2:[0-9]+]] {
6616 // CHECK4-NEXT:  entry:
6617 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6618 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6619 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6620 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6621 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6622 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6623 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6624 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6625 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6626 // CHECK4-NEXT:    ret void
6627 //
6628 //
6629 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
6630 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6631 // CHECK4-NEXT:  entry:
6632 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6633 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6634 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6635 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6636 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6637 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6638 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6639 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6640 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6641 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6642 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6643 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6644 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6645 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6646 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6647 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6648 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6649 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6650 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6651 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6652 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6653 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6654 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6655 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6656 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6657 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6658 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6659 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6660 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6661 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6662 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6663 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6664 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6665 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6666 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6667 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6668 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6669 // CHECK4:       omp.precond.then:
6670 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6671 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6672 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6673 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6674 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6675 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6676 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6677 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6678 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6679 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6680 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6681 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6682 // CHECK4:       cond.true:
6683 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6684 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6685 // CHECK4:       cond.false:
6686 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6687 // CHECK4-NEXT:    br label [[COND_END]]
6688 // CHECK4:       cond.end:
6689 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6690 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6691 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6692 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6693 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6694 // CHECK4:       omp.inner.for.cond:
6695 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6696 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
6697 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6698 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6699 // CHECK4:       omp.inner.for.body:
6700 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
6701 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
6702 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
6703 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6704 // CHECK4:       omp.inner.for.inc:
6705 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6706 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
6707 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6708 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
6709 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
6710 // CHECK4:       omp.inner.for.end:
6711 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6712 // CHECK4:       omp.loop.exit:
6713 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6714 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6715 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6716 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6717 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6718 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6719 // CHECK4:       .omp.final.then:
6720 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6721 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6722 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6723 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6724 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6725 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6726 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6727 // CHECK4:       .omp.final.done:
6728 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6729 // CHECK4:       omp.precond.end:
6730 // CHECK4-NEXT:    ret void
6731 //
6732 //
6733 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
6734 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6735 // CHECK4-NEXT:  entry:
6736 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6737 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6738 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6739 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6740 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6741 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6742 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6743 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6744 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6745 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6746 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6747 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6748 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6749 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6750 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6751 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6752 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6753 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6754 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
6755 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6756 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6757 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6758 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6759 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6760 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6761 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6762 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6763 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6764 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6765 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6766 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6767 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6768 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6769 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6770 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6771 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6772 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6773 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6774 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6775 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6776 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6777 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6778 // CHECK4:       omp.precond.then:
6779 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6780 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6781 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6782 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
6783 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
6784 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
6785 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
6786 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6787 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6788 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6789 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6790 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6791 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6792 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6793 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6794 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6795 // CHECK4:       cond.true:
6796 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6797 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6798 // CHECK4:       cond.false:
6799 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6800 // CHECK4-NEXT:    br label [[COND_END]]
6801 // CHECK4:       cond.end:
6802 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6803 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6804 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6805 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6806 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6807 // CHECK4:       omp.inner.for.cond:
6808 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6809 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
6810 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6811 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6812 // CHECK4:       omp.inner.for.body:
6813 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6814 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6815 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6816 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
6817 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
6818 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6819 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
6820 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
6821 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
6822 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6823 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
6824 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
6825 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
6826 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
6827 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
6828 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
6829 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
6830 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
6831 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
6832 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
6833 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
6834 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
6835 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
6836 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
6837 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
6838 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
6839 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6840 // CHECK4:       omp.body.continue:
6841 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6842 // CHECK4:       omp.inner.for.inc:
6843 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6844 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
6845 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
6846 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
6847 // CHECK4:       omp.inner.for.end:
6848 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6849 // CHECK4:       omp.loop.exit:
6850 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6851 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
6852 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
6853 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6854 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
6855 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6856 // CHECK4:       .omp.final.then:
6857 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6858 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
6859 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
6860 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
6861 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
6862 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
6863 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6864 // CHECK4:       .omp.final.done:
6865 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6866 // CHECK4:       omp.precond.end:
6867 // CHECK4-NEXT:    ret void
6868 //
6869 //
6870 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
6871 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
6872 // CHECK4-NEXT:  entry:
6873 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6874 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
6875 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
6876 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
6877 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6878 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
6879 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
6880 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
6881 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6882 // CHECK4-NEXT:    ret void
6883 //
6884 //
6885 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
6886 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6887 // CHECK4-NEXT:  entry:
6888 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6889 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6890 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6891 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6892 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6893 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
6894 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6895 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6896 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6897 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6898 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6899 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6900 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6901 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6902 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6903 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
6904 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6905 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6906 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
6907 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
6908 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
6909 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
6910 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
6911 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
6912 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
6913 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
6914 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6915 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6916 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6917 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6918 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6919 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6920 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6921 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
6922 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6923 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6924 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6925 // CHECK4:       omp.precond.then:
6926 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6927 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6928 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6929 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6930 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6931 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6932 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6933 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6934 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6935 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6936 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6937 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6938 // CHECK4:       cond.true:
6939 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6940 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6941 // CHECK4:       cond.false:
6942 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6943 // CHECK4-NEXT:    br label [[COND_END]]
6944 // CHECK4:       cond.end:
6945 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6946 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6947 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6948 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6949 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6950 // CHECK4:       omp.inner.for.cond:
6951 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6952 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
6953 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6954 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6955 // CHECK4:       omp.inner.for.body:
6956 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
6957 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
6958 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
6959 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6960 // CHECK4:       omp.inner.for.inc:
6961 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6962 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
6963 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
6964 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
6965 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
6966 // CHECK4:       omp.inner.for.end:
6967 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6968 // CHECK4:       omp.loop.exit:
6969 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6970 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
6971 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
6972 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6973 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6974 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6975 // CHECK4:       .omp.final.then:
6976 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6977 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
6978 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6979 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6980 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6981 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6982 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6983 // CHECK4:       .omp.final.done:
6984 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
6985 // CHECK4:       omp.precond.end:
6986 // CHECK4-NEXT:    ret void
6987 //
6988 //
6989 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
6990 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
6991 // CHECK4-NEXT:  entry:
6992 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6993 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6994 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
6995 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
6996 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
6997 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
6998 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
6999 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7000 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7001 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7002 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7003 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7004 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7005 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7006 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7007 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7008 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7009 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7010 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
7011 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7012 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7013 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7014 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7015 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7016 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7017 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7018 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7019 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7020 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7021 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7022 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7023 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7024 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7025 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7026 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7027 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7028 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7029 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7030 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7031 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7032 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7033 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7034 // CHECK4:       omp.precond.then:
7035 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7036 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7037 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7038 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7039 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7040 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7041 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7042 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7043 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7044 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7045 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7046 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7047 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7048 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7049 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7050 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7051 // CHECK4:       cond.true:
7052 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7053 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7054 // CHECK4:       cond.false:
7055 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7056 // CHECK4-NEXT:    br label [[COND_END]]
7057 // CHECK4:       cond.end:
7058 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7059 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7060 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7061 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7062 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7063 // CHECK4:       omp.inner.for.cond:
7064 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7065 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
7066 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7067 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7068 // CHECK4:       omp.inner.for.body:
7069 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7070 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7071 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7072 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
7073 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
7074 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7075 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7076 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
7077 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
7078 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7079 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7080 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
7081 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7082 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
7083 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
7084 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7085 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
7086 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
7087 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
7088 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
7089 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
7090 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
7091 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
7092 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
7093 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
7094 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
7095 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7096 // CHECK4:       omp.body.continue:
7097 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7098 // CHECK4:       omp.inner.for.inc:
7099 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7100 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7101 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
7102 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
7103 // CHECK4:       omp.inner.for.end:
7104 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7105 // CHECK4:       omp.loop.exit:
7106 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7107 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7108 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7109 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7110 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7111 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7112 // CHECK4:       .omp.final.then:
7113 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7114 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7115 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7116 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7117 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7118 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7119 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7120 // CHECK4:       .omp.final.done:
7121 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7122 // CHECK4:       omp.precond.end:
7123 // CHECK4-NEXT:    ret void
7124 //
7125 //
7126 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
7127 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7128 // CHECK4-NEXT:  entry:
7129 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
7130 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7131 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7132 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7133 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7134 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
7135 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7136 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7137 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7138 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7139 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7140 // CHECK4-NEXT:    ret void
7141 //
7142 //
7143 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
7144 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7145 // CHECK4-NEXT:  entry:
7146 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7147 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7148 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
7149 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7150 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7151 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7152 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7153 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7154 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7155 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7156 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7157 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7158 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7159 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7160 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7161 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7162 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7163 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7164 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7165 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
7166 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7167 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7168 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7169 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7170 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
7171 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7172 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
7173 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
7174 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
7175 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
7176 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
7177 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7178 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
7179 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7180 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7181 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7182 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7183 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7184 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
7185 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7186 // CHECK4:       omp.precond.then:
7187 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7188 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7189 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
7190 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7191 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7192 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
7193 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7194 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7195 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
7196 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7197 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7198 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7199 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7200 // CHECK4:       cond.true:
7201 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7202 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7203 // CHECK4:       cond.false:
7204 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7205 // CHECK4-NEXT:    br label [[COND_END]]
7206 // CHECK4:       cond.end:
7207 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7208 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7209 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7210 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7211 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7212 // CHECK4:       omp.inner.for.cond:
7213 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7214 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7215 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
7216 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
7217 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7218 // CHECK4:       omp.inner.for.body:
7219 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7220 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7221 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
7222 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7223 // CHECK4:       omp.inner.for.inc:
7224 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7225 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7226 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7227 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7228 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7229 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7230 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
7231 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7232 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7233 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
7234 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
7235 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7236 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7237 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7238 // CHECK4-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
7239 // CHECK4-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
7240 // CHECK4:       cond.true10:
7241 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
7242 // CHECK4-NEXT:    br label [[COND_END12:%.*]]
7243 // CHECK4:       cond.false11:
7244 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7245 // CHECK4-NEXT:    br label [[COND_END12]]
7246 // CHECK4:       cond.end12:
7247 // CHECK4-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
7248 // CHECK4-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
7249 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
7250 // CHECK4-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
7251 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
7252 // CHECK4:       omp.inner.for.end:
7253 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7254 // CHECK4:       omp.loop.exit:
7255 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7256 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
7257 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
7258 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7259 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
7260 // CHECK4-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7261 // CHECK4:       .omp.final.then:
7262 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7263 // CHECK4-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
7264 // CHECK4-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7265 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
7266 // CHECK4-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
7267 // CHECK4-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
7268 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7269 // CHECK4:       .omp.final.done:
7270 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7271 // CHECK4:       omp.precond.end:
7272 // CHECK4-NEXT:    ret void
7273 //
7274 //
7275 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
7276 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7277 // CHECK4-NEXT:  entry:
7278 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7279 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7280 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7281 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7282 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7283 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7284 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7285 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7286 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7287 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7288 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7289 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7290 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7291 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7292 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7293 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7294 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7295 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7296 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
7297 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7298 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7299 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7300 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7301 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7302 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7303 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7304 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7305 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7306 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7307 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7308 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7309 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7310 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7311 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7312 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7313 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7314 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7315 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7316 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7317 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7318 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7319 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7320 // CHECK4:       omp.precond.then:
7321 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7322 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7323 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7324 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7325 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7326 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7327 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7328 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7329 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7330 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7331 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7332 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7333 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7334 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7335 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7336 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7337 // CHECK4:       cond.true:
7338 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7339 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7340 // CHECK4:       cond.false:
7341 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7342 // CHECK4-NEXT:    br label [[COND_END]]
7343 // CHECK4:       cond.end:
7344 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7345 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7346 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7347 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7348 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7349 // CHECK4:       omp.inner.for.cond:
7350 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7351 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
7352 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7353 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7354 // CHECK4:       omp.inner.for.body:
7355 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7356 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7357 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7358 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
7359 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
7360 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7361 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7362 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
7363 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
7364 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7365 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7366 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
7367 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7368 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
7369 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
7370 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7371 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
7372 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
7373 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
7374 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
7375 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
7376 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
7377 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
7378 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
7379 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
7380 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
7381 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7382 // CHECK4:       omp.body.continue:
7383 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7384 // CHECK4:       omp.inner.for.inc:
7385 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7386 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7387 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
7388 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
7389 // CHECK4:       omp.inner.for.end:
7390 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7391 // CHECK4:       omp.loop.exit:
7392 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7393 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7394 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7395 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7396 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7397 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7398 // CHECK4:       .omp.final.then:
7399 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7400 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7401 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7402 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7403 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7404 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7405 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7406 // CHECK4:       .omp.final.done:
7407 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7408 // CHECK4:       omp.precond.end:
7409 // CHECK4-NEXT:    ret void
7410 //
7411 //
7412 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
7413 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7414 // CHECK4-NEXT:  entry:
7415 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7416 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7417 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7418 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7419 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7420 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7421 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7422 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7423 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7424 // CHECK4-NEXT:    ret void
7425 //
7426 //
7427 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
7428 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7429 // CHECK4-NEXT:  entry:
7430 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7431 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7432 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7433 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7434 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7435 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7436 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7437 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7438 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7439 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7440 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7441 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7442 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7443 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7444 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7445 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7446 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7447 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7448 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7449 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7450 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7451 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7452 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7453 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7454 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7455 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7456 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7457 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7458 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7459 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7460 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7461 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7462 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7463 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7464 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7465 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7466 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7467 // CHECK4:       omp.precond.then:
7468 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7469 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7470 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
7471 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7472 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7473 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7474 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7475 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7476 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7477 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7478 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
7479 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7480 // CHECK4:       cond.true:
7481 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7482 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7483 // CHECK4:       cond.false:
7484 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7485 // CHECK4-NEXT:    br label [[COND_END]]
7486 // CHECK4:       cond.end:
7487 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7488 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7489 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7490 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
7491 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7492 // CHECK4:       omp.inner.for.cond:
7493 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7494 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
7495 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
7496 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7497 // CHECK4:       omp.inner.for.body:
7498 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
7499 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
7500 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
7501 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7502 // CHECK4:       omp.inner.for.inc:
7503 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7504 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
7505 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
7506 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7507 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
7508 // CHECK4:       omp.inner.for.end:
7509 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7510 // CHECK4:       omp.loop.exit:
7511 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7512 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
7513 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
7514 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7515 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
7516 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7517 // CHECK4:       .omp.final.then:
7518 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7519 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
7520 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
7521 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
7522 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
7523 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
7524 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7525 // CHECK4:       .omp.final.done:
7526 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7527 // CHECK4:       omp.precond.end:
7528 // CHECK4-NEXT:    ret void
7529 //
7530 //
7531 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
7532 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7533 // CHECK4-NEXT:  entry:
7534 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7535 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7536 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7537 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7538 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7539 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7540 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7541 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7542 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7543 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7544 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7545 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7546 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7547 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7548 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7549 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7550 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7551 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7552 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
7553 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7554 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7555 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7556 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7557 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7558 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7559 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7560 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7561 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7562 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7563 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7564 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7565 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7566 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7567 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7568 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7569 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7570 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7571 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7572 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7573 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7574 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7575 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7576 // CHECK4:       omp.precond.then:
7577 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7578 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7579 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7580 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7581 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7582 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7583 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7584 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7585 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7586 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7587 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7588 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7589 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7590 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7591 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7592 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7593 // CHECK4:       cond.true:
7594 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7595 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7596 // CHECK4:       cond.false:
7597 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7598 // CHECK4-NEXT:    br label [[COND_END]]
7599 // CHECK4:       cond.end:
7600 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7601 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7602 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7603 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7604 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7605 // CHECK4:       omp.inner.for.cond:
7606 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7607 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
7608 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7609 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7610 // CHECK4:       omp.inner.for.body:
7611 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7612 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7613 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7614 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
7615 // CHECK4-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
7616 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7617 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
7618 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
7619 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
7620 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7621 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7622 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
7623 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
7624 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
7625 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
7626 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7627 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
7628 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
7629 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
7630 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
7631 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
7632 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
7633 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
7634 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
7635 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
7636 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
7637 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7638 // CHECK4:       omp.body.continue:
7639 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7640 // CHECK4:       omp.inner.for.inc:
7641 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7642 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
7643 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7644 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
7645 // CHECK4:       omp.inner.for.end:
7646 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7647 // CHECK4:       omp.loop.exit:
7648 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7649 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
7650 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
7651 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7652 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
7653 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7654 // CHECK4:       .omp.final.then:
7655 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7656 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
7657 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
7658 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
7659 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
7660 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
7661 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7662 // CHECK4:       .omp.final.done:
7663 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7664 // CHECK4:       omp.precond.end:
7665 // CHECK4-NEXT:    ret void
7666 //
7667 //
7668 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
7669 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7670 // CHECK4-NEXT:  entry:
7671 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
7672 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7673 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7674 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7675 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7676 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
7677 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7678 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7679 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7680 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7681 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7682 // CHECK4-NEXT:    ret void
7683 //
7684 //
7685 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14
7686 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7687 // CHECK4-NEXT:  entry:
7688 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7689 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7690 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
7691 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7692 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7693 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7694 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7695 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7696 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7697 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7698 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7699 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7700 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7701 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7702 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7703 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7704 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7705 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
7706 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
7707 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7708 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7709 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
7710 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7711 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7712 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7713 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7714 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
7715 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7716 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
7717 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
7718 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
7719 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
7720 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
7721 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
7722 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7723 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7724 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
7725 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7726 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7727 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7728 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7729 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7730 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
7731 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7732 // CHECK4:       omp.precond.then:
7733 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7734 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7735 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
7736 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7737 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7738 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7739 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7740 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7741 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7742 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7743 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7744 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7745 // CHECK4:       cond.true:
7746 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7747 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7748 // CHECK4:       cond.false:
7749 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7750 // CHECK4-NEXT:    br label [[COND_END]]
7751 // CHECK4:       cond.end:
7752 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7753 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7754 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7755 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7756 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7757 // CHECK4:       omp.inner.for.cond:
7758 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7759 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
7760 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7761 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7762 // CHECK4:       omp.inner.for.body:
7763 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
7764 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
7765 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
7766 // CHECK4-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
7767 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
7768 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
7769 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7770 // CHECK4:       omp.inner.for.inc:
7771 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7772 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
7773 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
7774 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
7775 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
7776 // CHECK4:       omp.inner.for.end:
7777 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7778 // CHECK4:       omp.loop.exit:
7779 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7780 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
7781 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
7782 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7783 // CHECK4-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
7784 // CHECK4-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7785 // CHECK4:       .omp.final.then:
7786 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7787 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
7788 // CHECK4-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
7789 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
7790 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
7791 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
7792 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7793 // CHECK4:       .omp.final.done:
7794 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7795 // CHECK4:       omp.precond.end:
7796 // CHECK4-NEXT:    ret void
7797 //
7798 //
7799 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15
7800 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7801 // CHECK4-NEXT:  entry:
7802 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7803 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7804 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
7805 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
7806 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7807 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7808 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7809 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7810 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
7811 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7812 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7813 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7814 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7815 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7816 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7817 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7818 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7819 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7820 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
7821 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
7822 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7823 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7824 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7825 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7826 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7827 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7828 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7829 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7830 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
7831 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7832 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7833 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7834 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
7835 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7836 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7837 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7838 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7839 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7840 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7841 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7842 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
7843 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7844 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7845 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7846 // CHECK4:       omp.precond.then:
7847 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7848 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7849 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7850 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
7851 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7852 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
7853 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
7854 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7855 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7856 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
7857 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7858 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
7859 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
7860 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7861 // CHECK4:       omp.dispatch.cond:
7862 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7863 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7864 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
7865 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7866 // CHECK4:       cond.true:
7867 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
7868 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7869 // CHECK4:       cond.false:
7870 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7871 // CHECK4-NEXT:    br label [[COND_END]]
7872 // CHECK4:       cond.end:
7873 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
7874 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7875 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7876 // CHECK4-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
7877 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7878 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7879 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
7880 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7881 // CHECK4:       omp.dispatch.body:
7882 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7883 // CHECK4:       omp.inner.for.cond:
7884 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7885 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
7886 // CHECK4-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
7887 // CHECK4-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7888 // CHECK4:       omp.inner.for.body:
7889 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7890 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
7891 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7892 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
7893 // CHECK4-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
7894 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7895 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
7896 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
7897 // CHECK4-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
7898 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7899 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
7900 // CHECK4-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
7901 // CHECK4-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
7902 // CHECK4-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
7903 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
7904 // CHECK4-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
7905 // CHECK4-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
7906 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
7907 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
7908 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
7909 // CHECK4-NEXT:    store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
7910 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
7911 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
7912 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
7913 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
7914 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
7915 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7916 // CHECK4:       omp.body.continue:
7917 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7918 // CHECK4:       omp.inner.for.inc:
7919 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7920 // CHECK4-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
7921 // CHECK4-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
7922 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
7923 // CHECK4:       omp.inner.for.end:
7924 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7925 // CHECK4:       omp.dispatch.inc:
7926 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7927 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7928 // CHECK4-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
7929 // CHECK4-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
7930 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7931 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7932 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
7933 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
7934 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
7935 // CHECK4:       omp.dispatch.end:
7936 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7937 // CHECK4-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
7938 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
7939 // CHECK4-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7940 // CHECK4-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
7941 // CHECK4-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7942 // CHECK4:       .omp.final.then:
7943 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7944 // CHECK4-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
7945 // CHECK4-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7946 // CHECK4-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
7947 // CHECK4-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
7948 // CHECK4-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
7949 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7950 // CHECK4:       .omp.final.done:
7951 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7952 // CHECK4:       omp.precond.end:
7953 // CHECK4-NEXT:    ret void
7954 //
7955 //
7956 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
7957 // CHECK4-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
7958 // CHECK4-NEXT:  entry:
7959 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7960 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
7961 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
7962 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
7963 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7964 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
7965 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
7966 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
7967 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
7968 // CHECK4-NEXT:    ret void
7969 //
7970 //
7971 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..18
7972 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
7973 // CHECK4-NEXT:  entry:
7974 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7975 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7976 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
7977 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
7978 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
7979 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
7980 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7981 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7982 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7983 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7984 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7985 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7986 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7987 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7988 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7989 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
7990 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7991 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7992 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
7993 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
7994 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
7995 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
7996 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
7997 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
7998 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
7999 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
8000 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8001 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8002 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8003 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8004 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8005 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8006 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8007 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8008 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8009 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8010 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8011 // CHECK4:       omp.precond.then:
8012 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8013 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8014 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8015 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8016 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8017 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8018 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8019 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8020 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8021 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8022 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8023 // CHECK4-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8024 // CHECK4:       cond.true:
8025 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8026 // CHECK4-NEXT:    br label [[COND_END:%.*]]
8027 // CHECK4:       cond.false:
8028 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8029 // CHECK4-NEXT:    br label [[COND_END]]
8030 // CHECK4:       cond.end:
8031 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8032 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8033 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8034 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8035 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8036 // CHECK4:       omp.inner.for.cond:
8037 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8038 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
8039 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8040 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8041 // CHECK4:       omp.inner.for.body:
8042 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
8043 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
8044 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
8045 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8046 // CHECK4:       omp.inner.for.inc:
8047 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8048 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
8049 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
8050 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8051 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
8052 // CHECK4:       omp.inner.for.end:
8053 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8054 // CHECK4:       omp.loop.exit:
8055 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8056 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
8057 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
8058 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8059 // CHECK4-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8060 // CHECK4-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8061 // CHECK4:       .omp.final.then:
8062 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8063 // CHECK4-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
8064 // CHECK4-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8065 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8066 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8067 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
8068 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8069 // CHECK4:       .omp.final.done:
8070 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8071 // CHECK4:       omp.precond.end:
8072 // CHECK4-NEXT:    ret void
8073 //
8074 //
8075 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..19
8076 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
8077 // CHECK4-NEXT:  entry:
8078 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8079 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8080 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
8081 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
8082 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8083 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8084 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8085 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8086 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8087 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8088 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8089 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8090 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8091 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8092 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8093 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8094 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8095 // CHECK4-NEXT:    [[I3:%.*]] = alloca i32, align 4
8096 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
8097 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8098 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8099 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8100 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8101 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8102 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8103 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8104 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8105 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8106 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
8107 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
8108 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
8109 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8110 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8111 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8112 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8113 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8114 // CHECK4-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8115 // CHECK4-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8116 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8117 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8118 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8119 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8120 // CHECK4:       omp.precond.then:
8121 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8122 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8123 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8124 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8125 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8126 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
8127 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
8128 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8129 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8130 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8131 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8132 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8133 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
8134 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
8135 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8136 // CHECK4:       omp.dispatch.cond:
8137 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8138 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
8139 // CHECK4-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
8140 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
8141 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8142 // CHECK4:       omp.dispatch.body:
8143 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8144 // CHECK4-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
8145 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8146 // CHECK4:       omp.inner.for.cond:
8147 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8148 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
8149 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
8150 // CHECK4-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8151 // CHECK4:       omp.inner.for.body:
8152 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8153 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
8154 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8155 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
8156 // CHECK4-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
8157 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8158 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
8159 // CHECK4-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
8160 // CHECK4-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
8161 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8162 // CHECK4-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
8163 // CHECK4-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
8164 // CHECK4-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
8165 // CHECK4-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
8166 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
8167 // CHECK4-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
8168 // CHECK4-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
8169 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
8170 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
8171 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
8172 // CHECK4-NEXT:    store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
8173 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
8174 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
8175 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
8176 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
8177 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
8178 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8179 // CHECK4:       omp.body.continue:
8180 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8181 // CHECK4:       omp.inner.for.inc:
8182 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8183 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
8184 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8185 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
8186 // CHECK4:       omp.inner.for.end:
8187 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8188 // CHECK4:       omp.dispatch.inc:
8189 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
8190 // CHECK4:       omp.dispatch.end:
8191 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8192 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
8193 // CHECK4-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8194 // CHECK4:       .omp.final.then:
8195 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8196 // CHECK4-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
8197 // CHECK4-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
8198 // CHECK4-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
8199 // CHECK4-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8200 // CHECK4-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
8201 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8202 // CHECK4:       .omp.final.done:
8203 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8204 // CHECK4:       omp.precond.end:
8205 // CHECK4-NEXT:    ret void
8206 //
8207 //
8208 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
8209 // CHECK4-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR2]] {
8210 // CHECK4-NEXT:  entry:
8211 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
8212 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8213 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
8214 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
8215 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
8216 // CHECK4-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
8217 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8218 // CHECK4-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
8219 // CHECK4-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
8220 // CHECK4-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
8221 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
8222 // CHECK4-NEXT:    ret void
8223 //
8224 //
8225 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..22
8226 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
8227 // CHECK4-NEXT:  entry:
8228 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8229 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8230 // CHECK4-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
8231 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8232 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8233 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8234 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8235 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8236 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8237 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8238 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8239 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8240 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8241 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8242 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8243 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8244 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8245 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
8246 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
8247 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8248 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8249 // CHECK4-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
8250 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8251 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8252 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8253 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8254 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
8255 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8256 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
8257 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
8258 // CHECK4-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
8259 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
8260 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8261 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
8262 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8263 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8264 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
8265 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8266 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8267 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8268 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8269 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8270 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
8271 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8272 // CHECK4:       omp.precond.then:
8273 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8274 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8275 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
8276 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8277 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8278 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8279 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8280 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8281 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8282 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8283 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8284 // CHECK4-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8285 // CHECK4:       cond.true:
8286 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8287 // CHECK4-NEXT:    br label [[COND_END:%.*]]
8288 // CHECK4:       cond.false:
8289 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8290 // CHECK4-NEXT:    br label [[COND_END]]
8291 // CHECK4:       cond.end:
8292 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8293 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8294 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8295 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8296 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8297 // CHECK4:       omp.inner.for.cond:
8298 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8299 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
8300 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8301 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8302 // CHECK4:       omp.inner.for.body:
8303 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
8304 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
8305 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
8306 // CHECK4-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
8307 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
8308 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
8309 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8310 // CHECK4:       omp.inner.for.inc:
8311 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8312 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
8313 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8314 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
8315 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
8316 // CHECK4:       omp.inner.for.end:
8317 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8318 // CHECK4:       omp.loop.exit:
8319 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8320 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
8321 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
8322 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8323 // CHECK4-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
8324 // CHECK4-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8325 // CHECK4:       .omp.final.then:
8326 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8327 // CHECK4-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
8328 // CHECK4-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
8329 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
8330 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
8331 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
8332 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8333 // CHECK4:       .omp.final.done:
8334 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8335 // CHECK4:       omp.precond.end:
8336 // CHECK4-NEXT:    ret void
8337 //
8338 //
8339 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..23
8340 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8341 // CHECK4-NEXT:  entry:
8342 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
8343 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
8344 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
8345 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
8346 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
8347 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
8348 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
8349 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
8350 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
8351 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8352 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8353 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8354 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8355 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
8356 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8357 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8358 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8359 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8360 // CHECK4-NEXT:    [[I4:%.*]] = alloca i32, align 4
8361 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
8362 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
8363 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
8364 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8365 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8366 // CHECK4-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
8367 // CHECK4-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
8368 // CHECK4-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
8369 // CHECK4-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
8370 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
8371 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
8372 // CHECK4-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
8373 // CHECK4-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
8374 // CHECK4-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
8375 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8376 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8377 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8378 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8379 // CHECK4-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8380 // CHECK4-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8381 // CHECK4-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8382 // CHECK4-NEXT:    store i32 0, i32* [[I]], align 4
8383 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8384 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8385 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8386 // CHECK4:       omp.precond.then:
8387 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8388 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8389 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8390 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
8391 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
8392 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
8393 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
8394 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8395 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8396 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
8397 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8398 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8399 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8400 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
8401 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
8402 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8403 // CHECK4:       omp.dispatch.cond:
8404 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
8405 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
8406 // CHECK4-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
8407 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
8408 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8409 // CHECK4:       omp.dispatch.body:
8410 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8411 // CHECK4-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
8412 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8413 // CHECK4:       omp.inner.for.cond:
8414 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8415 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
8416 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
8417 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8418 // CHECK4:       omp.inner.for.body:
8419 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8420 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
8421 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8422 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
8423 // CHECK4-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
8424 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8425 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
8426 // CHECK4-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
8427 // CHECK4-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
8428 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8429 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
8430 // CHECK4-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
8431 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
8432 // CHECK4-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
8433 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
8434 // CHECK4-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
8435 // CHECK4-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
8436 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
8437 // CHECK4-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
8438 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
8439 // CHECK4-NEXT:    store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
8440 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
8441 // CHECK4-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
8442 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
8443 // CHECK4-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
8444 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
8445 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8446 // CHECK4:       omp.body.continue:
8447 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8448 // CHECK4:       omp.inner.for.inc:
8449 // CHECK4-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8450 // CHECK4-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
8451 // CHECK4-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
8452 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
8453 // CHECK4:       omp.inner.for.end:
8454 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8455 // CHECK4:       omp.dispatch.inc:
8456 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
8457 // CHECK4:       omp.dispatch.end:
8458 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8459 // CHECK4-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
8460 // CHECK4-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8461 // CHECK4:       .omp.final.then:
8462 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8463 // CHECK4-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
8464 // CHECK4-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
8465 // CHECK4-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
8466 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
8467 // CHECK4-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
8468 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8469 // CHECK4:       .omp.final.done:
8470 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
8471 // CHECK4:       omp.precond.end:
8472 // CHECK4-NEXT:    ret void
8473 //
8474 //
8475 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
8476 // CHECK4-SAME: () #[[ATTR4:[0-9]+]] {
8477 // CHECK4-NEXT:  entry:
8478 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
8479 // CHECK4-NEXT:    ret void
8480 //
8481 //
8482 // CHECK5-LABEL: define {{[^@]+}}@main
8483 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
8484 // CHECK5-NEXT:  entry:
8485 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8486 // CHECK5-NEXT:    [[A:%.*]] = alloca double*, align 8
8487 // CHECK5-NEXT:    [[B:%.*]] = alloca double*, align 8
8488 // CHECK5-NEXT:    [[C:%.*]] = alloca double*, align 8
8489 // CHECK5-NEXT:    [[N:%.*]] = alloca i32, align 4
8490 // CHECK5-NEXT:    [[CH:%.*]] = alloca i32, align 4
8491 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
8492 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8493 // CHECK5-NEXT:    store i32 10000, i32* [[N]], align 4
8494 // CHECK5-NEXT:    store i32 100, i32* [[CH]], align 4
8495 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8496 // CHECK5-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
8497 // CHECK5-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8498 // CHECK5-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
8499 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8500 // CHECK5-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
8501 // CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8502 // CHECK5-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
8503 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8504 // CHECK5-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
8505 // CHECK5-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
8506 // CHECK5-NEXT:    ret i32 0
8507 //
8508 //
8509 // CHECK6-LABEL: define {{[^@]+}}@main
8510 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
8511 // CHECK6-NEXT:  entry:
8512 // CHECK6-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8513 // CHECK6-NEXT:    [[A:%.*]] = alloca double*, align 8
8514 // CHECK6-NEXT:    [[B:%.*]] = alloca double*, align 8
8515 // CHECK6-NEXT:    [[C:%.*]] = alloca double*, align 8
8516 // CHECK6-NEXT:    [[N:%.*]] = alloca i32, align 4
8517 // CHECK6-NEXT:    [[CH:%.*]] = alloca i32, align 4
8518 // CHECK6-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
8519 // CHECK6-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8520 // CHECK6-NEXT:    store i32 10000, i32* [[N]], align 4
8521 // CHECK6-NEXT:    store i32 100, i32* [[CH]], align 4
8522 // CHECK6-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8523 // CHECK6-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
8524 // CHECK6-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8525 // CHECK6-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
8526 // CHECK6-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8527 // CHECK6-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
8528 // CHECK6-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8529 // CHECK6-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
8530 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8531 // CHECK6-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
8532 // CHECK6-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[REF_TMP]])
8533 // CHECK6-NEXT:    ret i32 0
8534 //
8535 //
8536 // CHECK7-LABEL: define {{[^@]+}}@main
8537 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
8538 // CHECK7-NEXT:  entry:
8539 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8540 // CHECK7-NEXT:    [[A:%.*]] = alloca double*, align 4
8541 // CHECK7-NEXT:    [[B:%.*]] = alloca double*, align 4
8542 // CHECK7-NEXT:    [[C:%.*]] = alloca double*, align 4
8543 // CHECK7-NEXT:    [[N:%.*]] = alloca i32, align 4
8544 // CHECK7-NEXT:    [[CH:%.*]] = alloca i32, align 4
8545 // CHECK7-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
8546 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8547 // CHECK7-NEXT:    store i32 10000, i32* [[N]], align 4
8548 // CHECK7-NEXT:    store i32 100, i32* [[CH]], align 4
8549 // CHECK7-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8550 // CHECK7-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
8551 // CHECK7-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8552 // CHECK7-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
8553 // CHECK7-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8554 // CHECK7-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
8555 // CHECK7-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8556 // CHECK7-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
8557 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8558 // CHECK7-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
8559 // CHECK7-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
8560 // CHECK7-NEXT:    ret i32 0
8561 //
8562 //
8563 // CHECK8-LABEL: define {{[^@]+}}@main
8564 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
8565 // CHECK8-NEXT:  entry:
8566 // CHECK8-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8567 // CHECK8-NEXT:    [[A:%.*]] = alloca double*, align 4
8568 // CHECK8-NEXT:    [[B:%.*]] = alloca double*, align 4
8569 // CHECK8-NEXT:    [[C:%.*]] = alloca double*, align 4
8570 // CHECK8-NEXT:    [[N:%.*]] = alloca i32, align 4
8571 // CHECK8-NEXT:    [[CH:%.*]] = alloca i32, align 4
8572 // CHECK8-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
8573 // CHECK8-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8574 // CHECK8-NEXT:    store i32 10000, i32* [[N]], align 4
8575 // CHECK8-NEXT:    store i32 100, i32* [[CH]], align 4
8576 // CHECK8-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
8577 // CHECK8-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
8578 // CHECK8-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
8579 // CHECK8-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
8580 // CHECK8-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
8581 // CHECK8-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
8582 // CHECK8-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
8583 // CHECK8-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
8584 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
8585 // CHECK8-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
8586 // CHECK8-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(20) [[REF_TMP]])
8587 // CHECK8-NEXT:    ret i32 0
8588 //
8589 //
8590 // CHECK9-LABEL: define {{[^@]+}}@main
8591 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
8592 // CHECK9-NEXT:  entry:
8593 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
8594 // CHECK9-NEXT:    [[A:%.*]] = alloca double*, align 8
8595 // CHECK9-NEXT:    [[B:%.*]] = alloca double*, align 8
8596 // CHECK9-NEXT:    [[C:%.*]] = alloca double*, align 8
8597 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
8598 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
8599 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8600 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
8601 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
8602 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
8603 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8604 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8605 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8606 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
8607 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
8608 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
8609 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
8610 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
8611 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
8612 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
8613 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
8614 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
8615 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
8616 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
8617 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
8618 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
8619 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
8620 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
8621 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
8622 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
8623 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
8624 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
8625 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
8626 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
8627 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
8628 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
8629 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
8630 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
8631 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
8632 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
8633 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
8634 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
8635 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
8636 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
8637 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
8638 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
8639 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
8640 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
8641 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
8642 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
8643 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
8644 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
8645 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
8646 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
8647 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
8648 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
8649 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
8650 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
8651 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
8652 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
8653 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
8654 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
8655 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8656 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
8657 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
8658 // CHECK9-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 8
8659 // CHECK9-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 8
8660 // CHECK9-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 8
8661 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8662 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
8663 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
8664 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8665 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
8666 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
8667 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8668 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
8669 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8670 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
8671 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 8
8672 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8673 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
8674 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 8
8675 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8676 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
8677 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8678 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
8679 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 8
8680 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8681 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
8682 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 8
8683 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8684 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
8685 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
8686 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
8687 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 8
8688 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
8689 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
8690 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 8
8691 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
8692 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
8693 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8694 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8695 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
8696 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
8697 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8698 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
8699 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8700 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8701 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8702 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8703 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
8704 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
8705 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
8706 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8707 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8708 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8709 // CHECK9:       omp_offload.failed:
8710 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
8711 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8712 // CHECK9:       omp_offload.cont:
8713 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
8714 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
8715 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
8716 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
8717 // CHECK9-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 8
8718 // CHECK9-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 8
8719 // CHECK9-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 8
8720 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8721 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
8722 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
8723 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8724 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
8725 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
8726 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
8727 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
8728 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
8729 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
8730 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 8
8731 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
8732 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
8733 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 8
8734 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
8735 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
8736 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
8737 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
8738 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 8
8739 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
8740 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
8741 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 8
8742 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
8743 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
8744 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
8745 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
8746 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 8
8747 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
8748 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
8749 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 8
8750 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
8751 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
8752 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
8753 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
8754 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
8755 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
8756 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
8757 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
8758 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
8759 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
8760 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
8761 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
8762 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
8763 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
8764 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
8765 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8766 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
8767 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
8768 // CHECK9:       omp_offload.failed15:
8769 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
8770 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
8771 // CHECK9:       omp_offload.cont16:
8772 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
8773 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
8774 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
8775 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
8776 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
8777 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
8778 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
8779 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
8780 // CHECK9-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 8
8781 // CHECK9-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 8
8782 // CHECK9-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 8
8783 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8784 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
8785 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
8786 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8787 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
8788 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
8789 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
8790 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
8791 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
8792 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
8793 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
8794 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
8795 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
8796 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
8797 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
8798 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
8799 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
8800 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
8801 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 8
8802 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
8803 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
8804 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 8
8805 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
8806 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
8807 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
8808 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
8809 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 8
8810 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
8811 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
8812 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 8
8813 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
8814 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
8815 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
8816 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
8817 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 8
8818 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
8819 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
8820 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 8
8821 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
8822 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
8823 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8824 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8825 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
8826 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
8827 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
8828 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
8829 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
8830 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
8831 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
8832 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
8833 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
8834 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
8835 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
8836 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8837 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
8838 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
8839 // CHECK9:       omp_offload.failed30:
8840 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP67]], i64 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
8841 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
8842 // CHECK9:       omp_offload.cont31:
8843 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
8844 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
8845 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
8846 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
8847 // CHECK9-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 8
8848 // CHECK9-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 8
8849 // CHECK9-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 8
8850 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8851 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
8852 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
8853 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8854 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
8855 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
8856 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
8857 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
8858 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
8859 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
8860 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 8
8861 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
8862 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
8863 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 8
8864 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
8865 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
8866 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
8867 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
8868 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 8
8869 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
8870 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
8871 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 8
8872 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
8873 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
8874 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
8875 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
8876 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 8
8877 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
8878 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
8879 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 8
8880 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
8881 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
8882 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8883 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8884 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
8885 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
8886 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
8887 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
8888 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
8889 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
8890 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
8891 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
8892 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
8893 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
8894 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
8895 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8896 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
8897 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
8898 // CHECK9:       omp_offload.failed44:
8899 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
8900 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
8901 // CHECK9:       omp_offload.cont45:
8902 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
8903 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
8904 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
8905 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
8906 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
8907 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
8908 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
8909 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
8910 // CHECK9-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 8
8911 // CHECK9-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 8
8912 // CHECK9-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 8
8913 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8914 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
8915 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
8916 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8917 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
8918 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
8919 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
8920 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
8921 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
8922 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
8923 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
8924 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
8925 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
8926 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
8927 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
8928 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
8929 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
8930 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
8931 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 8
8932 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
8933 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
8934 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 8
8935 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
8936 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
8937 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
8938 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
8939 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 8
8940 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
8941 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
8942 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 8
8943 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
8944 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
8945 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
8946 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
8947 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 8
8948 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
8949 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
8950 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 8
8951 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
8952 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
8953 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8954 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8955 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
8956 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
8957 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
8958 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
8959 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
8960 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
8961 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
8962 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
8963 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
8964 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
8965 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
8966 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8967 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
8968 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
8969 // CHECK9:       omp_offload.failed60:
8970 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP140]], i64 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
8971 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
8972 // CHECK9:       omp_offload.cont61:
8973 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
8974 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
8975 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
8976 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
8977 // CHECK9-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 8
8978 // CHECK9-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 8
8979 // CHECK9-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 8
8980 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
8981 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
8982 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
8983 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
8984 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
8985 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
8986 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
8987 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
8988 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
8989 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
8990 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 8
8991 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
8992 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
8993 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 8
8994 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
8995 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
8996 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
8997 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
8998 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 8
8999 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
9000 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
9001 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 8
9002 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
9003 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
9004 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
9005 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
9006 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 8
9007 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
9008 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
9009 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 8
9010 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
9011 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
9012 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
9013 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
9014 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
9015 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
9016 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
9017 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
9018 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
9019 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
9020 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
9021 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
9022 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
9023 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
9024 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
9025 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9026 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
9027 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
9028 // CHECK9:       omp_offload.failed74:
9029 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
9030 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
9031 // CHECK9:       omp_offload.cont75:
9032 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
9033 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
9034 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
9035 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
9036 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
9037 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
9038 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
9039 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
9040 // CHECK9-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 8
9041 // CHECK9-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 8
9042 // CHECK9-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 8
9043 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
9044 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
9045 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
9046 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
9047 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
9048 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
9049 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
9050 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
9051 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
9052 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
9053 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
9054 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
9055 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
9056 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
9057 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
9058 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
9059 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
9060 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
9061 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 8
9062 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
9063 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
9064 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 8
9065 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
9066 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
9067 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
9068 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
9069 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 8
9070 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
9071 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
9072 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 8
9073 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
9074 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
9075 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
9076 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
9077 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 8
9078 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
9079 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
9080 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 8
9081 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
9082 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
9083 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
9084 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
9085 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
9086 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
9087 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
9088 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
9089 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
9090 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
9091 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
9092 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
9093 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
9094 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
9095 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
9096 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9097 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
9098 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
9099 // CHECK9:       omp_offload.failed90:
9100 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP213]], i64 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
9101 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
9102 // CHECK9:       omp_offload.cont91:
9103 // CHECK9-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
9104 // CHECK9-NEXT:    ret i32 [[CALL]]
9105 //
9106 //
9107 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
9108 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
9109 // CHECK9-NEXT:  entry:
9110 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9111 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9112 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9113 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9114 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9115 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9116 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9117 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9118 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9119 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9120 // CHECK9-NEXT:    ret void
9121 //
9122 //
9123 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
9124 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9125 // CHECK9-NEXT:  entry:
9126 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9127 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9128 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9129 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9130 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9131 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9132 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9133 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9134 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9135 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9136 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9137 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9138 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9139 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9140 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9141 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9142 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9143 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9144 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9145 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9146 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9147 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9148 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9149 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9150 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9151 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9152 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9153 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9154 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9155 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9156 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9157 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9158 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9159 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9160 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9161 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9162 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9163 // CHECK9:       omp.precond.then:
9164 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9165 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9166 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9167 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9168 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9169 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9170 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9171 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9172 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9173 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9174 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9175 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9176 // CHECK9:       cond.true:
9177 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9178 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9179 // CHECK9:       cond.false:
9180 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9181 // CHECK9-NEXT:    br label [[COND_END]]
9182 // CHECK9:       cond.end:
9183 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9184 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9185 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9186 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9187 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9188 // CHECK9:       omp.inner.for.cond:
9189 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9190 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
9191 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9192 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9193 // CHECK9:       omp.inner.for.body:
9194 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
9195 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9196 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
9197 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9198 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
9199 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9200 // CHECK9:       omp.inner.for.inc:
9201 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9202 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
9203 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9204 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
9205 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
9206 // CHECK9:       omp.inner.for.end:
9207 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9208 // CHECK9:       omp.loop.exit:
9209 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9210 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9211 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9212 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9213 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9214 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9215 // CHECK9:       .omp.final.then:
9216 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9217 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9218 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9219 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9220 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9221 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
9222 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9223 // CHECK9:       .omp.final.done:
9224 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9225 // CHECK9:       omp.precond.end:
9226 // CHECK9-NEXT:    ret void
9227 //
9228 //
9229 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
9230 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9231 // CHECK9-NEXT:  entry:
9232 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9233 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9234 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9235 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9236 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9237 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9238 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9239 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9240 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9241 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9242 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9243 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9244 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9245 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9246 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9247 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9248 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9249 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9250 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9251 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9252 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9253 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9254 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9255 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9256 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9257 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9258 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9259 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9260 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9261 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9262 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9263 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9264 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9265 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9266 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9267 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9268 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9269 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9270 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9271 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9272 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9273 // CHECK9:       omp.precond.then:
9274 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9275 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9276 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9277 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9278 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9279 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9280 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9281 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9282 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9283 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9284 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9285 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9286 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9287 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9288 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9289 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9290 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9291 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9292 // CHECK9:       cond.true:
9293 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9294 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9295 // CHECK9:       cond.false:
9296 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9297 // CHECK9-NEXT:    br label [[COND_END]]
9298 // CHECK9:       cond.end:
9299 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9300 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9301 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9302 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9303 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9304 // CHECK9:       omp.inner.for.cond:
9305 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9306 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
9307 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9308 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9309 // CHECK9:       omp.inner.for.body:
9310 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9311 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9312 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9313 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
9314 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
9315 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9316 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9317 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9318 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
9319 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
9320 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9321 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9322 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9323 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
9324 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9325 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
9326 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
9327 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9328 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9329 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
9330 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9331 // CHECK9:       omp.body.continue:
9332 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9333 // CHECK9:       omp.inner.for.inc:
9334 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9335 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9336 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
9337 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
9338 // CHECK9:       omp.inner.for.end:
9339 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9340 // CHECK9:       omp.loop.exit:
9341 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9342 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9343 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9344 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9345 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9346 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9347 // CHECK9:       .omp.final.then:
9348 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9349 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9350 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9351 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9352 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9353 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9354 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9355 // CHECK9:       .omp.final.done:
9356 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9357 // CHECK9:       omp.precond.end:
9358 // CHECK9-NEXT:    ret void
9359 //
9360 //
9361 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
9362 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9363 // CHECK9-NEXT:  entry:
9364 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9365 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9366 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9367 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9368 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9369 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9370 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9371 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9372 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9373 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9374 // CHECK9-NEXT:    ret void
9375 //
9376 //
9377 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
9378 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9379 // CHECK9-NEXT:  entry:
9380 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9381 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9382 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9383 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9384 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9385 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9386 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9387 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9388 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9389 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9390 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9391 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9392 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9393 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9394 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9395 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9396 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9397 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9398 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9399 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9400 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9401 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9402 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9403 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9404 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9405 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9406 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9407 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9408 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9409 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9410 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9411 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9412 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9413 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9414 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9415 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9416 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9417 // CHECK9:       omp.precond.then:
9418 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9419 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9420 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9421 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9422 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9423 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9424 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9425 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9426 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9427 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9428 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9429 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9430 // CHECK9:       cond.true:
9431 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9432 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9433 // CHECK9:       cond.false:
9434 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9435 // CHECK9-NEXT:    br label [[COND_END]]
9436 // CHECK9:       cond.end:
9437 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9438 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9439 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9440 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9441 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9442 // CHECK9:       omp.inner.for.cond:
9443 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9444 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
9445 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9446 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9447 // CHECK9:       omp.inner.for.body:
9448 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
9449 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9450 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
9451 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9452 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
9453 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9454 // CHECK9:       omp.inner.for.inc:
9455 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9456 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
9457 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9458 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
9459 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
9460 // CHECK9:       omp.inner.for.end:
9461 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9462 // CHECK9:       omp.loop.exit:
9463 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9464 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9465 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9466 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9467 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9468 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9469 // CHECK9:       .omp.final.then:
9470 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9471 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9472 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9473 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9474 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9475 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
9476 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9477 // CHECK9:       .omp.final.done:
9478 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9479 // CHECK9:       omp.precond.end:
9480 // CHECK9-NEXT:    ret void
9481 //
9482 //
9483 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
9484 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9485 // CHECK9-NEXT:  entry:
9486 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9487 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9488 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9489 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9490 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9491 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9492 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9493 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9494 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9495 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9496 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9497 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9498 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9499 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9500 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9501 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9502 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9503 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9504 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9505 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9506 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9507 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9508 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9509 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9510 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9511 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9512 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9513 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9514 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9515 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9516 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9517 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9518 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9519 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9520 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9521 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9522 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9523 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9524 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9525 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9526 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9527 // CHECK9:       omp.precond.then:
9528 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9529 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9530 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9531 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9532 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9533 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9534 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9535 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9536 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9537 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9538 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9539 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9540 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9541 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9542 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9543 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9544 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9545 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9546 // CHECK9:       cond.true:
9547 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9548 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9549 // CHECK9:       cond.false:
9550 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9551 // CHECK9-NEXT:    br label [[COND_END]]
9552 // CHECK9:       cond.end:
9553 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9554 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9555 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9556 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9557 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9558 // CHECK9:       omp.inner.for.cond:
9559 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9560 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
9561 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9562 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9563 // CHECK9:       omp.inner.for.body:
9564 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9565 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9566 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9567 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
9568 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
9569 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9570 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9571 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9572 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
9573 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
9574 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9575 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9576 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9577 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
9578 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9579 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
9580 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
9581 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9582 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9583 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
9584 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9585 // CHECK9:       omp.body.continue:
9586 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9587 // CHECK9:       omp.inner.for.inc:
9588 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9589 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9590 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
9591 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
9592 // CHECK9:       omp.inner.for.end:
9593 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9594 // CHECK9:       omp.loop.exit:
9595 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9596 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9597 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9598 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9599 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9600 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9601 // CHECK9:       .omp.final.then:
9602 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9603 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9604 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9605 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9606 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9607 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9608 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9609 // CHECK9:       .omp.final.done:
9610 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9611 // CHECK9:       omp.precond.end:
9612 // CHECK9-NEXT:    ret void
9613 //
9614 //
9615 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
9616 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9617 // CHECK9-NEXT:  entry:
9618 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
9619 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9620 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9621 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9622 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9623 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
9624 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9625 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9626 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9627 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9628 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
9629 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9630 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9631 // CHECK9-NEXT:    ret void
9632 //
9633 //
9634 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6
9635 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9636 // CHECK9-NEXT:  entry:
9637 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9638 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9639 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
9640 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9641 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9642 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9643 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9644 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9645 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9646 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9647 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9648 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9649 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9650 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9651 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9652 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9653 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9654 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9655 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9656 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
9657 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9658 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9659 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9660 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9661 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
9662 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9663 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
9664 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
9665 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
9666 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
9667 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
9668 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9669 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
9670 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9671 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9672 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9673 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9674 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9675 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
9676 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9677 // CHECK9:       omp.precond.then:
9678 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9679 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9680 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
9681 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9682 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9683 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
9684 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9685 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9686 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
9687 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9688 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9689 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9690 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9691 // CHECK9:       cond.true:
9692 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9693 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9694 // CHECK9:       cond.false:
9695 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9696 // CHECK9-NEXT:    br label [[COND_END]]
9697 // CHECK9:       cond.end:
9698 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9699 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9700 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9701 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9702 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9703 // CHECK9:       omp.inner.for.cond:
9704 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9705 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9706 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
9707 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
9708 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9709 // CHECK9:       omp.inner.for.body:
9710 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9711 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9712 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9713 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
9714 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
9715 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9716 // CHECK9:       omp.inner.for.inc:
9717 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9718 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9719 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
9720 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9721 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9722 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9723 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
9724 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9725 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9726 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
9727 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
9728 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9729 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9730 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9731 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
9732 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
9733 // CHECK9:       cond.true10:
9734 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
9735 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
9736 // CHECK9:       cond.false11:
9737 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9738 // CHECK9-NEXT:    br label [[COND_END12]]
9739 // CHECK9:       cond.end12:
9740 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
9741 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
9742 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
9743 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
9744 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
9745 // CHECK9:       omp.inner.for.end:
9746 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9747 // CHECK9:       omp.loop.exit:
9748 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9749 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
9750 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
9751 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9752 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
9753 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9754 // CHECK9:       .omp.final.then:
9755 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9756 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
9757 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
9758 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
9759 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
9760 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
9761 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9762 // CHECK9:       .omp.final.done:
9763 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9764 // CHECK9:       omp.precond.end:
9765 // CHECK9-NEXT:    ret void
9766 //
9767 //
9768 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7
9769 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9770 // CHECK9-NEXT:  entry:
9771 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9772 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9773 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9774 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9775 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9776 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9777 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9778 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9779 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9780 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9781 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9782 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9783 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9784 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9785 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9786 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9787 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9788 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9789 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9790 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9791 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9792 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9793 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9794 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9795 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9796 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9797 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9798 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9799 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9800 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9801 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9802 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9803 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9804 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9805 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9806 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9807 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9808 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9809 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9810 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9811 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9812 // CHECK9:       omp.precond.then:
9813 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9814 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9815 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9816 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9817 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9818 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9819 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9820 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9821 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9822 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9823 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9824 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9825 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9826 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9827 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9828 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9829 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9830 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9831 // CHECK9:       cond.true:
9832 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9833 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9834 // CHECK9:       cond.false:
9835 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9836 // CHECK9-NEXT:    br label [[COND_END]]
9837 // CHECK9:       cond.end:
9838 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9839 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9840 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9841 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9842 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9843 // CHECK9:       omp.inner.for.cond:
9844 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9845 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
9846 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9847 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9848 // CHECK9:       omp.inner.for.body:
9849 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9850 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9851 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9852 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
9853 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
9854 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9855 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9856 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
9857 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
9858 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
9859 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9860 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
9861 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
9862 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
9863 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
9864 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
9865 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
9866 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
9867 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
9868 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
9869 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9870 // CHECK9:       omp.body.continue:
9871 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9872 // CHECK9:       omp.inner.for.inc:
9873 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9874 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
9875 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9876 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
9877 // CHECK9:       omp.inner.for.end:
9878 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9879 // CHECK9:       omp.loop.exit:
9880 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9881 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9882 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9883 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9884 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9885 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9886 // CHECK9:       .omp.final.then:
9887 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9888 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
9889 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
9890 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
9891 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
9892 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
9893 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9894 // CHECK9:       .omp.final.done:
9895 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9896 // CHECK9:       omp.precond.end:
9897 // CHECK9-NEXT:    ret void
9898 //
9899 //
9900 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
9901 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
9902 // CHECK9-NEXT:  entry:
9903 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9904 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
9905 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
9906 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
9907 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9908 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
9909 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
9910 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
9911 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9912 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9913 // CHECK9-NEXT:    ret void
9914 //
9915 //
9916 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10
9917 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9918 // CHECK9-NEXT:  entry:
9919 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9920 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9921 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9922 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
9923 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
9924 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
9925 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9926 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9927 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9928 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9929 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9930 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9931 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9932 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9933 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9934 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
9935 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9936 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9937 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9938 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
9939 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
9940 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
9941 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9942 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
9943 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
9944 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
9945 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9946 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9947 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9948 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9949 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9950 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9951 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9952 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9953 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9954 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9955 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9956 // CHECK9:       omp.precond.then:
9957 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9958 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9959 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9960 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9961 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9962 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9963 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9964 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9965 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9966 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9967 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9968 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9969 // CHECK9:       cond.true:
9970 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9971 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9972 // CHECK9:       cond.false:
9973 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9974 // CHECK9-NEXT:    br label [[COND_END]]
9975 // CHECK9:       cond.end:
9976 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9977 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9978 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9979 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9980 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9981 // CHECK9:       omp.inner.for.cond:
9982 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9983 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
9984 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9985 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9986 // CHECK9:       omp.inner.for.body:
9987 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
9988 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9989 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
9990 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9991 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
9992 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9993 // CHECK9:       omp.inner.for.inc:
9994 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9995 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
9996 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9997 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
9998 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
9999 // CHECK9:       omp.inner.for.end:
10000 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10001 // CHECK9:       omp.loop.exit:
10002 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10003 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
10004 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
10005 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10006 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
10007 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10008 // CHECK9:       .omp.final.then:
10009 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10010 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
10011 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10012 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10013 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10014 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10015 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10016 // CHECK9:       .omp.final.done:
10017 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10018 // CHECK9:       omp.precond.end:
10019 // CHECK9-NEXT:    ret void
10020 //
10021 //
10022 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11
10023 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10024 // CHECK9-NEXT:  entry:
10025 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10026 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10027 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10028 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10029 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10030 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10031 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10032 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10033 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10034 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10035 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10036 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10037 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10038 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10039 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10040 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10041 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10042 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10043 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10044 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10045 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10046 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10047 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10048 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10049 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10050 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10051 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10052 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10053 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10054 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10055 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10056 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10057 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10058 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10059 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10060 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10061 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10062 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10063 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10064 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10065 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10066 // CHECK9:       omp.precond.then:
10067 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10068 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10069 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10070 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10071 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
10072 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10073 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
10074 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10075 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
10076 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10077 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10078 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10079 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10080 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10081 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10082 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10083 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10084 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10085 // CHECK9:       cond.true:
10086 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10087 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10088 // CHECK9:       cond.false:
10089 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10090 // CHECK9-NEXT:    br label [[COND_END]]
10091 // CHECK9:       cond.end:
10092 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10093 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10094 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10095 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10096 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10097 // CHECK9:       omp.inner.for.cond:
10098 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10099 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
10100 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10101 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10102 // CHECK9:       omp.inner.for.body:
10103 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10104 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10105 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10106 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
10107 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
10108 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10109 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
10110 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
10111 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
10112 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
10113 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10114 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
10115 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
10116 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
10117 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
10118 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
10119 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
10120 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
10121 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
10122 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
10123 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10124 // CHECK9:       omp.body.continue:
10125 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10126 // CHECK9:       omp.inner.for.inc:
10127 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10128 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
10129 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
10130 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
10131 // CHECK9:       omp.inner.for.end:
10132 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10133 // CHECK9:       omp.loop.exit:
10134 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10135 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10136 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10137 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10138 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10139 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10140 // CHECK9:       .omp.final.then:
10141 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10142 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
10143 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
10144 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
10145 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
10146 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
10147 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10148 // CHECK9:       .omp.final.done:
10149 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10150 // CHECK9:       omp.precond.end:
10151 // CHECK9-NEXT:    ret void
10152 //
10153 //
10154 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
10155 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10156 // CHECK9-NEXT:  entry:
10157 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
10158 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10159 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10160 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10161 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10162 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
10163 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10164 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10165 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10166 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10167 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
10168 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10169 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10170 // CHECK9-NEXT:    ret void
10171 //
10172 //
10173 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14
10174 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10175 // CHECK9-NEXT:  entry:
10176 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10177 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10178 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
10179 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10180 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10181 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10182 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10183 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10184 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10185 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10186 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10187 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10188 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10189 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10190 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10191 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10192 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10193 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10194 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10195 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10196 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10197 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
10198 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10199 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10200 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10201 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10202 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
10203 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10204 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
10205 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
10206 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
10207 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
10208 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10209 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
10210 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10211 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10212 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
10213 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10214 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10215 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10216 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10217 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10218 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
10219 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10220 // CHECK9:       omp.precond.then:
10221 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10222 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10223 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
10224 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10225 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10226 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10227 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10228 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10229 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10230 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10231 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10232 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10233 // CHECK9:       cond.true:
10234 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10235 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10236 // CHECK9:       cond.false:
10237 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10238 // CHECK9-NEXT:    br label [[COND_END]]
10239 // CHECK9:       cond.end:
10240 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10241 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10242 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10243 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10244 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10245 // CHECK9:       omp.inner.for.cond:
10246 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10247 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
10248 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10249 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10250 // CHECK9:       omp.inner.for.body:
10251 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
10252 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10253 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
10254 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
10255 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
10256 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10257 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
10258 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
10259 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
10260 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10261 // CHECK9:       omp.inner.for.inc:
10262 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10263 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
10264 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10265 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10266 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
10267 // CHECK9:       omp.inner.for.end:
10268 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10269 // CHECK9:       omp.loop.exit:
10270 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10271 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
10272 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
10273 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10274 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
10275 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10276 // CHECK9:       .omp.final.then:
10277 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10278 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
10279 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
10280 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
10281 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
10282 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
10283 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10284 // CHECK9:       .omp.final.done:
10285 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10286 // CHECK9:       omp.precond.end:
10287 // CHECK9-NEXT:    ret void
10288 //
10289 //
10290 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15
10291 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
10292 // CHECK9-NEXT:  entry:
10293 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10294 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10295 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10296 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10297 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10298 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10299 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10300 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10301 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10302 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10303 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10304 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10305 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10306 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10307 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10308 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10309 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10310 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10311 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
10312 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10313 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10314 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10315 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10316 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10317 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10318 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10319 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10320 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10321 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10322 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10323 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10324 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10325 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10326 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10327 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10328 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10329 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10330 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10331 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10332 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10333 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10334 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10335 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10336 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10337 // CHECK9:       omp.precond.then:
10338 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10339 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10340 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10341 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10342 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
10343 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10344 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
10345 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
10346 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
10347 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10348 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10349 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
10350 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10351 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
10352 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
10353 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10354 // CHECK9:       omp.dispatch.cond:
10355 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10356 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
10357 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10358 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
10359 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10360 // CHECK9:       cond.true:
10361 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10362 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10363 // CHECK9:       cond.false:
10364 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10365 // CHECK9-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
10366 // CHECK9-NEXT:    br label [[COND_END]]
10367 // CHECK9:       cond.end:
10368 // CHECK9-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
10369 // CHECK9-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
10370 // CHECK9-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
10371 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10372 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
10373 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10374 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10375 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
10376 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10377 // CHECK9:       omp.dispatch.body:
10378 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10379 // CHECK9:       omp.inner.for.cond:
10380 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10381 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
10382 // CHECK9-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
10383 // CHECK9-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10384 // CHECK9:       omp.inner.for.body:
10385 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10386 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
10387 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10388 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
10389 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
10390 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10391 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
10392 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
10393 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
10394 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
10395 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10396 // CHECK9-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
10397 // CHECK9-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM13]]
10398 // CHECK9-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !47
10399 // CHECK9-NEXT:    [[ADD15:%.*]] = fadd double [[TMP25]], [[TMP28]]
10400 // CHECK9-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
10401 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
10402 // CHECK9-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
10403 // CHECK9-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM16]]
10404 // CHECK9-NEXT:    store double [[ADD15]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !47
10405 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10406 // CHECK9:       omp.body.continue:
10407 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10408 // CHECK9:       omp.inner.for.inc:
10409 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10410 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP31]], 1
10411 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10412 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
10413 // CHECK9:       omp.inner.for.end:
10414 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10415 // CHECK9:       omp.dispatch.inc:
10416 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10417 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10418 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
10419 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
10420 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10421 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10422 // CHECK9-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
10423 // CHECK9-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
10424 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10425 // CHECK9:       omp.dispatch.end:
10426 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10427 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
10428 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
10429 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10430 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
10431 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10432 // CHECK9:       .omp.final.then:
10433 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10434 // CHECK9-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP40]], 0
10435 // CHECK9-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
10436 // CHECK9-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
10437 // CHECK9-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
10438 // CHECK9-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
10439 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10440 // CHECK9:       .omp.final.done:
10441 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10442 // CHECK9:       omp.precond.end:
10443 // CHECK9-NEXT:    ret void
10444 //
10445 //
10446 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
10447 // CHECK9-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10448 // CHECK9-NEXT:  entry:
10449 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10450 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10451 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10452 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10453 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10454 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10455 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10456 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10457 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10458 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10459 // CHECK9-NEXT:    ret void
10460 //
10461 //
10462 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18
10463 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10464 // CHECK9-NEXT:  entry:
10465 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10466 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10467 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10468 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10469 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10470 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10471 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10472 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10473 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10474 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10475 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10476 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10477 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10478 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10479 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10480 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
10481 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10482 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10483 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10484 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10485 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10486 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10487 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10488 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10489 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10490 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10491 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10492 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10493 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10494 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10495 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10496 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10497 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10498 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10499 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10500 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10501 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10502 // CHECK9:       omp.precond.then:
10503 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10504 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10505 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10506 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10507 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10508 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10509 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10510 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10511 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10512 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10513 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10514 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10515 // CHECK9:       cond.true:
10516 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10517 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10518 // CHECK9:       cond.false:
10519 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10520 // CHECK9-NEXT:    br label [[COND_END]]
10521 // CHECK9:       cond.end:
10522 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10523 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10524 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10525 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10526 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10527 // CHECK9:       omp.inner.for.cond:
10528 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10529 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
10530 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10531 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10532 // CHECK9:       omp.inner.for.body:
10533 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
10534 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
10535 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
10536 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10537 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
10538 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10539 // CHECK9:       omp.inner.for.inc:
10540 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10541 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
10542 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10543 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
10544 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
10545 // CHECK9:       omp.inner.for.end:
10546 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10547 // CHECK9:       omp.loop.exit:
10548 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10549 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
10550 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
10551 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10552 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
10553 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10554 // CHECK9:       .omp.final.then:
10555 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10556 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
10557 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10558 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10559 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10560 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10561 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10562 // CHECK9:       .omp.final.done:
10563 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10564 // CHECK9:       omp.precond.end:
10565 // CHECK9-NEXT:    ret void
10566 //
10567 //
10568 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19
10569 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10570 // CHECK9-NEXT:  entry:
10571 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10572 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10573 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10574 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10575 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10576 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10577 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10578 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10579 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10580 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10581 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10582 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10583 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10584 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10585 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10586 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10587 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10588 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10589 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10590 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10591 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10592 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10593 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10594 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10595 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10596 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10597 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10598 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10599 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10600 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10601 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10602 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10603 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10604 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10605 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10606 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10607 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10608 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10609 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10610 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10611 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10612 // CHECK9:       omp.precond.then:
10613 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10614 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10615 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10616 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10617 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
10618 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10619 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
10620 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
10621 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
10622 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10623 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10624 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10625 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10626 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10627 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
10628 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
10629 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10630 // CHECK9:       omp.dispatch.cond:
10631 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10632 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
10633 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10634 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
10635 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10636 // CHECK9:       omp.dispatch.body:
10637 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10638 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
10639 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10640 // CHECK9:       omp.inner.for.cond:
10641 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10642 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
10643 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
10644 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10645 // CHECK9:       omp.inner.for.body:
10646 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10647 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
10648 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10649 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
10650 // CHECK9-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
10651 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10652 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
10653 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
10654 // CHECK9-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
10655 // CHECK9-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
10656 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10657 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
10658 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
10659 // CHECK9-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
10660 // CHECK9-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
10661 // CHECK9-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
10662 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
10663 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
10664 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
10665 // CHECK9-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
10666 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10667 // CHECK9:       omp.body.continue:
10668 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10669 // CHECK9:       omp.inner.for.inc:
10670 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10671 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
10672 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
10673 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
10674 // CHECK9:       omp.inner.for.end:
10675 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10676 // CHECK9:       omp.dispatch.inc:
10677 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10678 // CHECK9:       omp.dispatch.end:
10679 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10680 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
10681 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10682 // CHECK9:       .omp.final.then:
10683 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10684 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
10685 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
10686 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
10687 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
10688 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
10689 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10690 // CHECK9:       .omp.final.done:
10691 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10692 // CHECK9:       omp.precond.end:
10693 // CHECK9-NEXT:    ret void
10694 //
10695 //
10696 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
10697 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
10698 // CHECK9-NEXT:  entry:
10699 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
10700 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10701 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
10702 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
10703 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
10704 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
10705 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10706 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
10707 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
10708 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
10709 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
10710 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10711 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10712 // CHECK9-NEXT:    ret void
10713 //
10714 //
10715 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..22
10716 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
10717 // CHECK9-NEXT:  entry:
10718 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10719 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10720 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
10721 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10722 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10723 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10724 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10725 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10726 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10727 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10728 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10729 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10730 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10731 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10732 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10733 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10734 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10735 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
10736 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10737 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10738 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10739 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
10740 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10741 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10742 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10743 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10744 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
10745 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10746 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
10747 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
10748 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
10749 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
10750 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10751 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
10752 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10753 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10754 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
10755 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10756 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10757 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10758 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10759 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10760 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
10761 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10762 // CHECK9:       omp.precond.then:
10763 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10764 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10765 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
10766 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10767 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10768 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10769 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10770 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10771 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10772 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10773 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10774 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10775 // CHECK9:       cond.true:
10776 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10777 // CHECK9-NEXT:    br label [[COND_END:%.*]]
10778 // CHECK9:       cond.false:
10779 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10780 // CHECK9-NEXT:    br label [[COND_END]]
10781 // CHECK9:       cond.end:
10782 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10783 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10784 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10785 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10786 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10787 // CHECK9:       omp.inner.for.cond:
10788 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10789 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
10790 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10791 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10792 // CHECK9:       omp.inner.for.body:
10793 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
10794 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
10795 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
10796 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
10797 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
10798 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
10799 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
10800 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
10801 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
10802 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10803 // CHECK9:       omp.inner.for.inc:
10804 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10805 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
10806 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10807 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
10808 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
10809 // CHECK9:       omp.inner.for.end:
10810 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10811 // CHECK9:       omp.loop.exit:
10812 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10813 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
10814 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
10815 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10816 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
10817 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10818 // CHECK9:       .omp.final.then:
10819 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10820 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
10821 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
10822 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
10823 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
10824 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
10825 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10826 // CHECK9:       .omp.final.done:
10827 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10828 // CHECK9:       omp.precond.end:
10829 // CHECK9-NEXT:    ret void
10830 //
10831 //
10832 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..23
10833 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
10834 // CHECK9-NEXT:  entry:
10835 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10836 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10837 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10838 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10839 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
10840 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
10841 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
10842 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
10843 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10844 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10845 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10846 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10847 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10848 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
10849 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10850 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10851 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10852 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10853 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
10854 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10855 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10856 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10857 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10858 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
10859 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
10860 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
10861 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
10862 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10863 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
10864 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
10865 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
10866 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
10867 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
10868 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10869 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10870 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10871 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10872 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10873 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
10874 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10875 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
10876 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10877 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10878 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10879 // CHECK9:       omp.precond.then:
10880 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10881 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10882 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10883 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
10884 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
10885 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
10886 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
10887 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
10888 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
10889 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10890 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10891 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
10892 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10893 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10894 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10895 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
10896 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
10897 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10898 // CHECK9:       omp.dispatch.cond:
10899 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10900 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
10901 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10902 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
10903 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10904 // CHECK9:       omp.dispatch.body:
10905 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10906 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
10907 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10908 // CHECK9:       omp.inner.for.cond:
10909 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10910 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
10911 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
10912 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10913 // CHECK9:       omp.inner.for.body:
10914 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10915 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
10916 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10917 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
10918 // CHECK9-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
10919 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10920 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
10921 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
10922 // CHECK9-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
10923 // CHECK9-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
10924 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10925 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
10926 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
10927 // CHECK9-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
10928 // CHECK9-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
10929 // CHECK9-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
10930 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
10931 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
10932 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
10933 // CHECK9-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
10934 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10935 // CHECK9:       omp.body.continue:
10936 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10937 // CHECK9:       omp.inner.for.inc:
10938 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10939 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
10940 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
10941 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
10942 // CHECK9:       omp.inner.for.end:
10943 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10944 // CHECK9:       omp.dispatch.inc:
10945 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
10946 // CHECK9:       omp.dispatch.end:
10947 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10948 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10949 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10950 // CHECK9:       .omp.final.then:
10951 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10952 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
10953 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
10954 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
10955 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
10956 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
10957 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10958 // CHECK9:       .omp.final.done:
10959 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
10960 // CHECK9:       omp.precond.end:
10961 // CHECK9-NEXT:    ret void
10962 //
10963 //
10964 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
10965 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] comdat {
10966 // CHECK9-NEXT:  entry:
10967 // CHECK9-NEXT:    [[A:%.*]] = alloca i32*, align 8
10968 // CHECK9-NEXT:    [[B:%.*]] = alloca i32*, align 8
10969 // CHECK9-NEXT:    [[C:%.*]] = alloca i32*, align 8
10970 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
10971 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
10972 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10973 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
10974 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
10975 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
10976 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10977 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10978 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10979 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
10980 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
10981 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
10982 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
10983 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
10984 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
10985 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
10986 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
10987 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
10988 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
10989 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
10990 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
10991 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
10992 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
10993 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
10994 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
10995 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
10996 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
10997 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
10998 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
10999 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
11000 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
11001 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
11002 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
11003 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
11004 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
11005 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
11006 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
11007 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
11008 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
11009 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
11010 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
11011 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
11012 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
11013 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
11014 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
11015 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
11016 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
11017 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
11018 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
11019 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
11020 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
11021 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
11022 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
11023 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
11024 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
11025 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
11026 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
11027 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
11028 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
11029 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
11030 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
11031 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
11032 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
11033 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11034 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
11035 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
11036 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11037 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
11038 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
11039 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
11040 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
11041 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11042 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
11043 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 8
11044 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11045 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
11046 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 8
11047 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
11048 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
11049 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11050 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
11051 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 8
11052 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11053 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
11054 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 8
11055 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
11056 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
11057 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
11058 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
11059 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 8
11060 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
11061 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
11062 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 8
11063 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
11064 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
11065 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11066 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11067 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
11068 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
11069 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11070 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
11071 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11072 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11073 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11074 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11075 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
11076 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
11077 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
11078 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11079 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11080 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11081 // CHECK9:       omp_offload.failed:
11082 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
11083 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11084 // CHECK9:       omp_offload.cont:
11085 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
11086 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
11087 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
11088 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
11089 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 8
11090 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 8
11091 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 8
11092 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11093 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
11094 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
11095 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11096 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
11097 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
11098 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
11099 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
11100 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
11101 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
11102 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 8
11103 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
11104 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
11105 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 8
11106 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
11107 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
11108 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
11109 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
11110 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 8
11111 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
11112 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
11113 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 8
11114 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
11115 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
11116 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
11117 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
11118 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 8
11119 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
11120 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
11121 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 8
11122 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
11123 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
11124 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
11125 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
11126 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
11127 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
11128 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
11129 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
11130 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
11131 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
11132 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
11133 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
11134 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
11135 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
11136 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
11137 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11138 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
11139 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
11140 // CHECK9:       omp_offload.failed15:
11141 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
11142 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
11143 // CHECK9:       omp_offload.cont16:
11144 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
11145 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
11146 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
11147 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
11148 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
11149 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
11150 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
11151 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
11152 // CHECK9-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 8
11153 // CHECK9-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 8
11154 // CHECK9-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 8
11155 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
11156 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
11157 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
11158 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
11159 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
11160 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
11161 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
11162 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
11163 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
11164 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
11165 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
11166 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
11167 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
11168 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
11169 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
11170 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
11171 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
11172 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
11173 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 8
11174 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
11175 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
11176 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 8
11177 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
11178 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
11179 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
11180 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
11181 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 8
11182 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
11183 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
11184 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 8
11185 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
11186 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
11187 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
11188 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
11189 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 8
11190 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
11191 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
11192 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 8
11193 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
11194 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
11195 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
11196 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
11197 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
11198 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
11199 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
11200 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
11201 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
11202 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
11203 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
11204 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
11205 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
11206 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
11207 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
11208 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11209 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
11210 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
11211 // CHECK9:       omp_offload.failed30:
11212 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP67]], i64 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
11213 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
11214 // CHECK9:       omp_offload.cont31:
11215 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
11216 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
11217 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
11218 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
11219 // CHECK9-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 8
11220 // CHECK9-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 8
11221 // CHECK9-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 8
11222 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
11223 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
11224 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
11225 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
11226 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
11227 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
11228 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
11229 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
11230 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
11231 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
11232 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 8
11233 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
11234 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
11235 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 8
11236 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
11237 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
11238 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
11239 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
11240 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 8
11241 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
11242 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
11243 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 8
11244 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
11245 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
11246 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
11247 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
11248 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 8
11249 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
11250 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
11251 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 8
11252 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
11253 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
11254 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
11255 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
11256 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
11257 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
11258 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
11259 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
11260 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
11261 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
11262 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
11263 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
11264 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
11265 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
11266 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
11267 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11268 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
11269 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
11270 // CHECK9:       omp_offload.failed44:
11271 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
11272 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
11273 // CHECK9:       omp_offload.cont45:
11274 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
11275 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
11276 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
11277 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
11278 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
11279 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
11280 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
11281 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
11282 // CHECK9-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 8
11283 // CHECK9-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 8
11284 // CHECK9-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 8
11285 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
11286 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
11287 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
11288 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
11289 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
11290 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
11291 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
11292 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
11293 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
11294 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
11295 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
11296 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
11297 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
11298 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
11299 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
11300 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
11301 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
11302 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
11303 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 8
11304 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
11305 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
11306 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 8
11307 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
11308 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
11309 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
11310 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
11311 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 8
11312 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
11313 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
11314 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 8
11315 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
11316 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
11317 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
11318 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
11319 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 8
11320 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
11321 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
11322 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 8
11323 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
11324 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
11325 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
11326 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
11327 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
11328 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
11329 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
11330 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
11331 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
11332 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
11333 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
11334 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
11335 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
11336 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
11337 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
11338 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11339 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
11340 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
11341 // CHECK9:       omp_offload.failed60:
11342 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP140]], i64 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
11343 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
11344 // CHECK9:       omp_offload.cont61:
11345 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
11346 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
11347 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
11348 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
11349 // CHECK9-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 8
11350 // CHECK9-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 8
11351 // CHECK9-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 8
11352 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
11353 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
11354 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
11355 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
11356 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
11357 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
11358 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
11359 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
11360 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
11361 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
11362 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 8
11363 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
11364 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
11365 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 8
11366 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
11367 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
11368 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
11369 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
11370 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 8
11371 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
11372 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
11373 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 8
11374 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
11375 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
11376 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
11377 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
11378 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 8
11379 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
11380 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
11381 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 8
11382 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
11383 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
11384 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
11385 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
11386 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
11387 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
11388 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
11389 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
11390 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
11391 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
11392 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
11393 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
11394 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
11395 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
11396 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
11397 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11398 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
11399 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
11400 // CHECK9:       omp_offload.failed74:
11401 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
11402 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
11403 // CHECK9:       omp_offload.cont75:
11404 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
11405 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
11406 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
11407 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
11408 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
11409 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
11410 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
11411 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
11412 // CHECK9-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 8
11413 // CHECK9-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 8
11414 // CHECK9-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 8
11415 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
11416 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
11417 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
11418 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
11419 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
11420 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
11421 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
11422 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
11423 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
11424 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
11425 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
11426 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
11427 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
11428 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
11429 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
11430 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
11431 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
11432 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
11433 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 8
11434 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
11435 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
11436 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 8
11437 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
11438 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
11439 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
11440 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
11441 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 8
11442 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
11443 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
11444 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 8
11445 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
11446 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
11447 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
11448 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
11449 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 8
11450 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
11451 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
11452 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 8
11453 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
11454 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
11455 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
11456 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
11457 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
11458 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
11459 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
11460 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
11461 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
11462 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
11463 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
11464 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
11465 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
11466 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
11467 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
11468 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11469 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
11470 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
11471 // CHECK9:       omp_offload.failed90:
11472 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP213]], i64 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
11473 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
11474 // CHECK9:       omp_offload.cont91:
11475 // CHECK9-NEXT:    ret i32 0
11476 //
11477 //
11478 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
11479 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11480 // CHECK9-NEXT:  entry:
11481 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11482 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11483 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11484 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11485 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11486 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11487 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11488 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11489 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11490 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
11491 // CHECK9-NEXT:    ret void
11492 //
11493 //
11494 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..26
11495 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11496 // CHECK9-NEXT:  entry:
11497 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11498 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11499 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11500 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11501 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11502 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11503 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11504 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11505 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11506 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11507 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11508 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11509 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11510 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11511 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11512 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
11513 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11514 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11515 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11516 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11517 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11518 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11519 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11520 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11521 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11522 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11523 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11524 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11525 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11526 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11527 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11528 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11529 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11530 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11531 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11532 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11533 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11534 // CHECK9:       omp.precond.then:
11535 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11536 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11537 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11538 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11539 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11540 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11541 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11542 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11543 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11544 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11545 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11546 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11547 // CHECK9:       cond.true:
11548 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11549 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11550 // CHECK9:       cond.false:
11551 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11552 // CHECK9-NEXT:    br label [[COND_END]]
11553 // CHECK9:       cond.end:
11554 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11555 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11556 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11557 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11558 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11559 // CHECK9:       omp.inner.for.cond:
11560 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11561 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
11562 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11563 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11564 // CHECK9:       omp.inner.for.body:
11565 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
11566 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11567 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
11568 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
11569 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
11570 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11571 // CHECK9:       omp.inner.for.inc:
11572 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11573 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
11574 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11575 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
11576 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
11577 // CHECK9:       omp.inner.for.end:
11578 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11579 // CHECK9:       omp.loop.exit:
11580 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11581 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
11582 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
11583 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11584 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11585 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11586 // CHECK9:       .omp.final.then:
11587 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11588 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
11589 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11590 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11591 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11592 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
11593 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11594 // CHECK9:       .omp.final.done:
11595 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11596 // CHECK9:       omp.precond.end:
11597 // CHECK9-NEXT:    ret void
11598 //
11599 //
11600 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..27
11601 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11602 // CHECK9-NEXT:  entry:
11603 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11604 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11605 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11606 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11607 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11608 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11609 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11610 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11611 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11612 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11613 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11614 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11615 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11616 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11617 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11618 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11619 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11620 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
11621 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11622 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11623 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11624 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11625 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11626 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11627 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11628 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11629 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11630 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11631 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11632 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11633 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11634 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11635 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11636 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11637 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11638 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11639 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11640 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11641 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11642 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11643 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11644 // CHECK9:       omp.precond.then:
11645 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11646 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11647 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11648 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11649 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
11650 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11651 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
11652 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11653 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
11654 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11655 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11656 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11657 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11658 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11659 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11660 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11661 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11662 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11663 // CHECK9:       cond.true:
11664 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11665 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11666 // CHECK9:       cond.false:
11667 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11668 // CHECK9-NEXT:    br label [[COND_END]]
11669 // CHECK9:       cond.end:
11670 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11671 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11672 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11673 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11674 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11675 // CHECK9:       omp.inner.for.cond:
11676 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11677 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
11678 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11679 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11680 // CHECK9:       omp.inner.for.body:
11681 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11682 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11683 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11684 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
11685 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
11686 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11687 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
11688 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
11689 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
11690 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
11691 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11692 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
11693 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
11694 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
11695 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
11696 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
11697 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
11698 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
11699 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
11700 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
11701 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11702 // CHECK9:       omp.body.continue:
11703 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11704 // CHECK9:       omp.inner.for.inc:
11705 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11706 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
11707 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
11708 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
11709 // CHECK9:       omp.inner.for.end:
11710 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11711 // CHECK9:       omp.loop.exit:
11712 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11713 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11714 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11715 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11716 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11717 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11718 // CHECK9:       .omp.final.then:
11719 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11720 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
11721 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
11722 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
11723 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
11724 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
11725 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11726 // CHECK9:       .omp.final.done:
11727 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11728 // CHECK9:       omp.precond.end:
11729 // CHECK9-NEXT:    ret void
11730 //
11731 //
11732 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
11733 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11734 // CHECK9-NEXT:  entry:
11735 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11736 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11737 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11738 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11739 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11740 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11741 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11742 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11743 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
11744 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
11745 // CHECK9-NEXT:    ret void
11746 //
11747 //
11748 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..30
11749 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11750 // CHECK9-NEXT:  entry:
11751 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11752 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11753 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11754 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11755 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11756 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11757 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11758 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11759 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11760 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11761 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11762 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11763 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11764 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11765 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11766 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
11767 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11768 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11769 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11770 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11771 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11772 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11773 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11774 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11775 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11776 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11777 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11778 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11779 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11780 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11781 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11782 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11783 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11784 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11785 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11786 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11787 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11788 // CHECK9:       omp.precond.then:
11789 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11790 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11791 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11792 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11793 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11794 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11795 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11796 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11797 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11798 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11799 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11800 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11801 // CHECK9:       cond.true:
11802 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11803 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11804 // CHECK9:       cond.false:
11805 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11806 // CHECK9-NEXT:    br label [[COND_END]]
11807 // CHECK9:       cond.end:
11808 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11809 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11810 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11811 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11812 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11813 // CHECK9:       omp.inner.for.cond:
11814 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11815 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
11816 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11817 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11818 // CHECK9:       omp.inner.for.body:
11819 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
11820 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
11821 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
11822 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
11823 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
11824 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11825 // CHECK9:       omp.inner.for.inc:
11826 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11827 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
11828 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11829 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
11830 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
11831 // CHECK9:       omp.inner.for.end:
11832 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11833 // CHECK9:       omp.loop.exit:
11834 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11835 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
11836 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
11837 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11838 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11839 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11840 // CHECK9:       .omp.final.then:
11841 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11842 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
11843 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11844 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11845 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11846 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
11847 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11848 // CHECK9:       .omp.final.done:
11849 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11850 // CHECK9:       omp.precond.end:
11851 // CHECK9-NEXT:    ret void
11852 //
11853 //
11854 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..31
11855 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
11856 // CHECK9-NEXT:  entry:
11857 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
11858 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
11859 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
11860 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
11861 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
11862 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
11863 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
11864 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
11865 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11866 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11867 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11868 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11869 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
11870 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11871 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11872 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11873 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11874 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
11875 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
11876 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
11877 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11878 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11879 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
11880 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
11881 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
11882 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
11883 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
11884 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
11885 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
11886 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
11887 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11888 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11889 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11890 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11891 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11892 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11893 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11894 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
11895 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11896 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11897 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11898 // CHECK9:       omp.precond.then:
11899 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11900 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11901 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11902 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
11903 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
11904 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
11905 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
11906 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
11907 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
11908 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11909 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11910 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11911 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11912 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11913 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11914 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11915 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11916 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11917 // CHECK9:       cond.true:
11918 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11919 // CHECK9-NEXT:    br label [[COND_END:%.*]]
11920 // CHECK9:       cond.false:
11921 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11922 // CHECK9-NEXT:    br label [[COND_END]]
11923 // CHECK9:       cond.end:
11924 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11925 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11926 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11927 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11928 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11929 // CHECK9:       omp.inner.for.cond:
11930 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11931 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
11932 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11933 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11934 // CHECK9:       omp.inner.for.body:
11935 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11936 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11937 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11938 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
11939 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
11940 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11941 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
11942 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
11943 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
11944 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
11945 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11946 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
11947 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
11948 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
11949 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
11950 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
11951 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
11952 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
11953 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
11954 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
11955 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11956 // CHECK9:       omp.body.continue:
11957 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11958 // CHECK9:       omp.inner.for.inc:
11959 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11960 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
11961 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
11962 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
11963 // CHECK9:       omp.inner.for.end:
11964 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11965 // CHECK9:       omp.loop.exit:
11966 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
11967 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11968 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11969 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11970 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11971 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11972 // CHECK9:       .omp.final.then:
11973 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11974 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
11975 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
11976 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
11977 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
11978 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
11979 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11980 // CHECK9:       .omp.final.done:
11981 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
11982 // CHECK9:       omp.precond.end:
11983 // CHECK9-NEXT:    ret void
11984 //
11985 //
11986 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
11987 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
11988 // CHECK9-NEXT:  entry:
11989 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
11990 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
11991 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
11992 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
11993 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
11994 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
11995 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
11996 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
11997 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
11998 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
11999 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
12000 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12001 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12002 // CHECK9-NEXT:    ret void
12003 //
12004 //
12005 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..34
12006 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12007 // CHECK9-NEXT:  entry:
12008 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12009 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12010 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
12011 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12012 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12013 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12014 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12015 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12016 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12017 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12018 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12019 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12020 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12021 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12022 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12023 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12024 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12025 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12026 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12027 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
12028 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12029 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12030 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12031 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12032 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
12033 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12034 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12035 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12036 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12037 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
12038 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12039 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12040 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
12041 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12042 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12043 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12044 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12045 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12046 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
12047 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12048 // CHECK9:       omp.precond.then:
12049 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12050 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12051 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
12052 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12053 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12054 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
12055 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12056 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12057 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
12058 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12059 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12060 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12061 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12062 // CHECK9:       cond.true:
12063 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12064 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12065 // CHECK9:       cond.false:
12066 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12067 // CHECK9-NEXT:    br label [[COND_END]]
12068 // CHECK9:       cond.end:
12069 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12070 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12071 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12072 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12073 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12074 // CHECK9:       omp.inner.for.cond:
12075 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12076 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12077 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
12078 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
12079 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12080 // CHECK9:       omp.inner.for.body:
12081 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12082 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12083 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12084 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
12085 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
12086 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12087 // CHECK9:       omp.inner.for.inc:
12088 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12089 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12090 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
12091 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12092 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12093 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12094 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12095 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12096 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12097 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
12098 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
12099 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12100 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12101 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12102 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
12103 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
12104 // CHECK9:       cond.true10:
12105 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
12106 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
12107 // CHECK9:       cond.false11:
12108 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12109 // CHECK9-NEXT:    br label [[COND_END12]]
12110 // CHECK9:       cond.end12:
12111 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
12112 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
12113 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
12114 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
12115 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
12116 // CHECK9:       omp.inner.for.end:
12117 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12118 // CHECK9:       omp.loop.exit:
12119 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12120 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
12121 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
12122 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12123 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
12124 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12125 // CHECK9:       .omp.final.then:
12126 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12127 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
12128 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
12129 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
12130 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
12131 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
12132 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12133 // CHECK9:       .omp.final.done:
12134 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12135 // CHECK9:       omp.precond.end:
12136 // CHECK9-NEXT:    ret void
12137 //
12138 //
12139 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..35
12140 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12141 // CHECK9-NEXT:  entry:
12142 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12143 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12144 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12145 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12146 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12147 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12148 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12149 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12150 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12151 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12152 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12153 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12154 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12155 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12156 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12157 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12158 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12159 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12160 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12161 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12162 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12163 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12164 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12165 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12166 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12167 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12168 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12169 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12170 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12171 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12172 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12173 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12174 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12175 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12176 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12177 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12178 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12179 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12180 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12181 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12182 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12183 // CHECK9:       omp.precond.then:
12184 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12185 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12186 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12187 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12188 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12189 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12190 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12191 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12192 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12193 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12194 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12195 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12196 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12197 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12198 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12199 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12200 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12201 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12202 // CHECK9:       cond.true:
12203 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12204 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12205 // CHECK9:       cond.false:
12206 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12207 // CHECK9-NEXT:    br label [[COND_END]]
12208 // CHECK9:       cond.end:
12209 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12210 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12211 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12212 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12213 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12214 // CHECK9:       omp.inner.for.cond:
12215 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12216 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
12217 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12218 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12219 // CHECK9:       omp.inner.for.body:
12220 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12221 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12222 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12223 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
12224 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
12225 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12226 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
12227 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
12228 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
12229 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
12230 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12231 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
12232 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
12233 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
12234 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12235 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
12236 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
12237 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
12238 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
12239 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
12240 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12241 // CHECK9:       omp.body.continue:
12242 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12243 // CHECK9:       omp.inner.for.inc:
12244 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12245 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
12246 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
12247 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
12248 // CHECK9:       omp.inner.for.end:
12249 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12250 // CHECK9:       omp.loop.exit:
12251 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12252 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12253 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12254 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12255 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12256 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12257 // CHECK9:       .omp.final.then:
12258 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12259 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
12260 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
12261 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
12262 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
12263 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
12264 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12265 // CHECK9:       .omp.final.done:
12266 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12267 // CHECK9:       omp.precond.end:
12268 // CHECK9-NEXT:    ret void
12269 //
12270 //
12271 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
12272 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12273 // CHECK9-NEXT:  entry:
12274 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12275 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12276 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12277 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12278 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12279 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12280 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12281 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12282 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12283 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12284 // CHECK9-NEXT:    ret void
12285 //
12286 //
12287 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..38
12288 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12289 // CHECK9-NEXT:  entry:
12290 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12291 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12292 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12293 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12294 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12295 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12296 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12297 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12298 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12299 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12300 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12301 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12302 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12303 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12304 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12305 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12306 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12307 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12308 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12309 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12310 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12311 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12312 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12313 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12314 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12315 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12316 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12317 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12318 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12319 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12320 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12321 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12322 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12323 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12324 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12325 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12326 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12327 // CHECK9:       omp.precond.then:
12328 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12329 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12330 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12331 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12332 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12333 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12334 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12335 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12336 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12337 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12338 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12339 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12340 // CHECK9:       cond.true:
12341 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12342 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12343 // CHECK9:       cond.false:
12344 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12345 // CHECK9-NEXT:    br label [[COND_END]]
12346 // CHECK9:       cond.end:
12347 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12348 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12349 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12350 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12351 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12352 // CHECK9:       omp.inner.for.cond:
12353 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12354 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
12355 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12356 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12357 // CHECK9:       omp.inner.for.body:
12358 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
12359 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12360 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
12361 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12362 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
12363 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12364 // CHECK9:       omp.inner.for.inc:
12365 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12366 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
12367 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12368 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
12369 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
12370 // CHECK9:       omp.inner.for.end:
12371 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12372 // CHECK9:       omp.loop.exit:
12373 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12374 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12375 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12376 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12377 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12378 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12379 // CHECK9:       .omp.final.then:
12380 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12381 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
12382 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12383 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12384 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12385 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12386 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12387 // CHECK9:       .omp.final.done:
12388 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12389 // CHECK9:       omp.precond.end:
12390 // CHECK9-NEXT:    ret void
12391 //
12392 //
12393 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..39
12394 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12395 // CHECK9-NEXT:  entry:
12396 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12397 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12398 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12399 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12400 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12401 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12402 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12403 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12404 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12405 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12406 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12407 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12408 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12409 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12410 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12411 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12412 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12413 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12414 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12415 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12416 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12417 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12418 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12419 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12420 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12421 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12422 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12423 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12424 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12425 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12426 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12427 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12428 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12429 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12430 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12431 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12432 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12433 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12434 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12435 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12436 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12437 // CHECK9:       omp.precond.then:
12438 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12439 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12440 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12441 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12442 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12443 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12444 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12445 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12446 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12447 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12448 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12449 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12450 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12451 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12452 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12453 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12454 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12455 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12456 // CHECK9:       cond.true:
12457 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12458 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12459 // CHECK9:       cond.false:
12460 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12461 // CHECK9-NEXT:    br label [[COND_END]]
12462 // CHECK9:       cond.end:
12463 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12464 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12465 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12466 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12467 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12468 // CHECK9:       omp.inner.for.cond:
12469 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12470 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
12471 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12472 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12473 // CHECK9:       omp.inner.for.body:
12474 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12475 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12476 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12477 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
12478 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
12479 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12480 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
12481 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
12482 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
12483 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
12484 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12485 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
12486 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
12487 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
12488 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12489 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
12490 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
12491 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
12492 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
12493 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
12494 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12495 // CHECK9:       omp.body.continue:
12496 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12497 // CHECK9:       omp.inner.for.inc:
12498 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12499 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
12500 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
12501 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
12502 // CHECK9:       omp.inner.for.end:
12503 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12504 // CHECK9:       omp.loop.exit:
12505 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12506 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12507 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12508 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12509 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12510 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12511 // CHECK9:       .omp.final.then:
12512 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12513 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
12514 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
12515 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
12516 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
12517 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
12518 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12519 // CHECK9:       .omp.final.done:
12520 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12521 // CHECK9:       omp.precond.end:
12522 // CHECK9-NEXT:    ret void
12523 //
12524 //
12525 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
12526 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12527 // CHECK9-NEXT:  entry:
12528 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
12529 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12530 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12531 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12532 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12533 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
12534 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12535 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12536 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12537 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12538 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
12539 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12540 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12541 // CHECK9-NEXT:    ret void
12542 //
12543 //
12544 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..42
12545 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12546 // CHECK9-NEXT:  entry:
12547 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12548 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12549 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
12550 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12551 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12552 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12553 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12554 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12555 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12556 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12557 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12558 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12559 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12560 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12561 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12562 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12563 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12564 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12565 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
12566 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12567 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12568 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
12569 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12570 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12571 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12572 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12573 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
12574 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12575 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12576 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12577 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12578 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
12579 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12580 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
12581 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12582 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12583 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
12584 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12585 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12586 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12587 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12588 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12589 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
12590 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12591 // CHECK9:       omp.precond.then:
12592 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12593 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12594 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
12595 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12596 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12597 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12598 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12599 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12600 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12601 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12602 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12603 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12604 // CHECK9:       cond.true:
12605 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12606 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12607 // CHECK9:       cond.false:
12608 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12609 // CHECK9-NEXT:    br label [[COND_END]]
12610 // CHECK9:       cond.end:
12611 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12612 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12613 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12614 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12615 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12616 // CHECK9:       omp.inner.for.cond:
12617 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12618 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
12619 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12620 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12621 // CHECK9:       omp.inner.for.body:
12622 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
12623 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12624 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
12625 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
12626 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
12627 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
12628 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
12629 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
12630 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
12631 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12632 // CHECK9:       omp.inner.for.inc:
12633 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12634 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
12635 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12636 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
12637 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
12638 // CHECK9:       omp.inner.for.end:
12639 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12640 // CHECK9:       omp.loop.exit:
12641 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12642 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
12643 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
12644 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12645 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
12646 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12647 // CHECK9:       .omp.final.then:
12648 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12649 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
12650 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12651 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12652 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12653 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
12654 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12655 // CHECK9:       .omp.final.done:
12656 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12657 // CHECK9:       omp.precond.end:
12658 // CHECK9-NEXT:    ret void
12659 //
12660 //
12661 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..43
12662 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
12663 // CHECK9-NEXT:  entry:
12664 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12665 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12666 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12667 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12668 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12669 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12670 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12671 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12672 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
12673 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12674 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12675 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12676 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12677 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12678 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12679 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12680 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12681 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12682 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
12683 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12684 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12685 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12686 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12687 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12688 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12689 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12690 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12691 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
12692 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12693 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12694 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12695 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12696 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
12697 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12698 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12699 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12700 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12701 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12702 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12703 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12704 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12705 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12706 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12707 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12708 // CHECK9:       omp.precond.then:
12709 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12710 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12711 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12712 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12713 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
12714 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12715 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
12716 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
12717 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
12718 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12719 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12720 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
12721 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12722 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
12723 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
12724 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12725 // CHECK9:       omp.dispatch.cond:
12726 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12727 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
12728 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12729 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
12730 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12731 // CHECK9:       cond.true:
12732 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12733 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12734 // CHECK9:       cond.false:
12735 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12736 // CHECK9-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
12737 // CHECK9-NEXT:    br label [[COND_END]]
12738 // CHECK9:       cond.end:
12739 // CHECK9-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
12740 // CHECK9-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
12741 // CHECK9-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
12742 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12743 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
12744 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12745 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12746 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
12747 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12748 // CHECK9:       omp.dispatch.body:
12749 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12750 // CHECK9:       omp.inner.for.cond:
12751 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12752 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
12753 // CHECK9-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
12754 // CHECK9-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12755 // CHECK9:       omp.inner.for.body:
12756 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12757 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
12758 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12759 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
12760 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
12761 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12762 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
12763 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
12764 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
12765 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
12766 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12767 // CHECK9-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
12768 // CHECK9-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM13]]
12769 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !89
12770 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
12771 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
12772 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
12773 // CHECK9-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
12774 // CHECK9-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM16]]
12775 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX17]], align 4, !llvm.access.group !89
12776 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12777 // CHECK9:       omp.body.continue:
12778 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12779 // CHECK9:       omp.inner.for.inc:
12780 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12781 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP31]], 1
12782 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
12783 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
12784 // CHECK9:       omp.inner.for.end:
12785 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
12786 // CHECK9:       omp.dispatch.inc:
12787 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12788 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12789 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
12790 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
12791 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12792 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12793 // CHECK9-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
12794 // CHECK9-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
12795 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
12796 // CHECK9:       omp.dispatch.end:
12797 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12798 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
12799 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
12800 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12801 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
12802 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12803 // CHECK9:       .omp.final.then:
12804 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12805 // CHECK9-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP40]], 0
12806 // CHECK9-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
12807 // CHECK9-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
12808 // CHECK9-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
12809 // CHECK9-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
12810 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12811 // CHECK9:       .omp.final.done:
12812 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12813 // CHECK9:       omp.precond.end:
12814 // CHECK9-NEXT:    ret void
12815 //
12816 //
12817 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
12818 // CHECK9-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
12819 // CHECK9-NEXT:  entry:
12820 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
12821 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
12822 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
12823 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
12824 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
12825 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
12826 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
12827 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
12828 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
12829 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12830 // CHECK9-NEXT:    ret void
12831 //
12832 //
12833 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..46
12834 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12835 // CHECK9-NEXT:  entry:
12836 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12837 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12838 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12839 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12840 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12841 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12842 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12843 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12844 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12845 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12846 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12847 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12848 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12849 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12850 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12851 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
12852 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12853 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12854 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12855 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12856 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12857 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12858 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12859 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12860 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12861 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12862 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12863 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12864 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12865 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12866 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12867 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12868 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12869 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12870 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12871 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12872 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12873 // CHECK9:       omp.precond.then:
12874 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12875 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12876 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12877 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12878 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12879 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12880 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12881 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12882 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12883 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12884 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12885 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12886 // CHECK9:       cond.true:
12887 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12888 // CHECK9-NEXT:    br label [[COND_END:%.*]]
12889 // CHECK9:       cond.false:
12890 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12891 // CHECK9-NEXT:    br label [[COND_END]]
12892 // CHECK9:       cond.end:
12893 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12894 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12895 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12896 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12897 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12898 // CHECK9:       omp.inner.for.cond:
12899 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12900 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
12901 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12902 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12903 // CHECK9:       omp.inner.for.body:
12904 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
12905 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
12906 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
12907 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
12908 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
12909 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12910 // CHECK9:       omp.inner.for.inc:
12911 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12912 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
12913 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12914 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
12915 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
12916 // CHECK9:       omp.inner.for.end:
12917 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12918 // CHECK9:       omp.loop.exit:
12919 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12920 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
12921 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
12922 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12923 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12924 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12925 // CHECK9:       .omp.final.then:
12926 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12927 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
12928 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12929 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12930 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12931 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12932 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12933 // CHECK9:       .omp.final.done:
12934 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
12935 // CHECK9:       omp.precond.end:
12936 // CHECK9-NEXT:    ret void
12937 //
12938 //
12939 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..47
12940 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
12941 // CHECK9-NEXT:  entry:
12942 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
12943 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
12944 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
12945 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
12946 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
12947 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
12948 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
12949 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
12950 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12951 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12952 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12953 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12954 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
12955 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12956 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12957 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12958 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12959 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
12960 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
12961 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
12962 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12963 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12964 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
12965 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
12966 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
12967 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
12968 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
12969 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
12970 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
12971 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
12972 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12973 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12974 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12975 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12976 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12977 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12978 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12979 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
12980 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12981 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12982 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12983 // CHECK9:       omp.precond.then:
12984 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12985 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12986 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12987 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
12988 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
12989 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
12990 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
12991 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
12992 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
12993 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12994 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12995 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12996 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12997 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
12998 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
12999 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
13000 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13001 // CHECK9:       omp.dispatch.cond:
13002 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13003 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
13004 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13005 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
13006 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13007 // CHECK9:       omp.dispatch.body:
13008 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13009 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
13010 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13011 // CHECK9:       omp.inner.for.cond:
13012 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13013 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
13014 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
13015 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13016 // CHECK9:       omp.inner.for.body:
13017 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13018 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
13019 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13020 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
13021 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
13022 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13023 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
13024 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
13025 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
13026 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
13027 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13028 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
13029 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
13030 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
13031 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
13032 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
13033 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
13034 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
13035 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
13036 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
13037 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13038 // CHECK9:       omp.body.continue:
13039 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13040 // CHECK9:       omp.inner.for.inc:
13041 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13042 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
13043 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
13044 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
13045 // CHECK9:       omp.inner.for.end:
13046 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13047 // CHECK9:       omp.dispatch.inc:
13048 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
13049 // CHECK9:       omp.dispatch.end:
13050 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13051 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
13052 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13053 // CHECK9:       .omp.final.then:
13054 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13055 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
13056 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
13057 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
13058 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
13059 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
13060 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13061 // CHECK9:       .omp.final.done:
13062 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13063 // CHECK9:       omp.precond.end:
13064 // CHECK9-NEXT:    ret void
13065 //
13066 //
13067 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
13068 // CHECK9-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
13069 // CHECK9-NEXT:  entry:
13070 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
13071 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
13072 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
13073 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
13074 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
13075 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
13076 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
13077 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
13078 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
13079 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
13080 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
13081 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
13082 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13083 // CHECK9-NEXT:    ret void
13084 //
13085 //
13086 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..50
13087 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13088 // CHECK9-NEXT:  entry:
13089 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13090 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13091 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
13092 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13093 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
13094 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
13095 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
13096 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13097 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13098 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13099 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13100 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13101 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
13102 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13103 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13104 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13105 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13106 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
13107 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
13108 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13109 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13110 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
13111 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13112 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
13113 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
13114 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
13115 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
13116 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13117 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
13118 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
13119 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
13120 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
13121 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13122 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
13123 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13124 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13125 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
13126 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13127 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13128 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13129 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
13130 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13131 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
13132 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13133 // CHECK9:       omp.precond.then:
13134 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13135 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13136 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
13137 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13138 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13139 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13140 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13141 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13142 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13143 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13144 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13145 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13146 // CHECK9:       cond.true:
13147 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13148 // CHECK9-NEXT:    br label [[COND_END:%.*]]
13149 // CHECK9:       cond.false:
13150 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13151 // CHECK9-NEXT:    br label [[COND_END]]
13152 // CHECK9:       cond.end:
13153 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13154 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13155 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13156 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13157 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13158 // CHECK9:       omp.inner.for.cond:
13159 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13160 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
13161 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13162 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13163 // CHECK9:       omp.inner.for.body:
13164 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
13165 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
13166 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
13167 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
13168 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
13169 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
13170 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
13171 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
13172 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
13173 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13174 // CHECK9:       omp.inner.for.inc:
13175 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13176 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
13177 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
13178 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
13179 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
13180 // CHECK9:       omp.inner.for.end:
13181 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13182 // CHECK9:       omp.loop.exit:
13183 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13184 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
13185 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
13186 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13187 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
13188 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13189 // CHECK9:       .omp.final.then:
13190 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13191 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
13192 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
13193 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
13194 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
13195 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
13196 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13197 // CHECK9:       .omp.final.done:
13198 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13199 // CHECK9:       omp.precond.end:
13200 // CHECK9-NEXT:    ret void
13201 //
13202 //
13203 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..51
13204 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
13205 // CHECK9-NEXT:  entry:
13206 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13207 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13208 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13209 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13210 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13211 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
13212 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
13213 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
13214 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
13215 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13216 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13217 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13218 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13219 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
13220 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13221 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13222 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13223 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13224 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
13225 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13226 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13227 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13228 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13229 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13230 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
13231 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
13232 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
13233 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
13234 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13235 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
13236 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
13237 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
13238 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
13239 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13240 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13241 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13242 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13243 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13244 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13245 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13246 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
13247 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13248 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13249 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13250 // CHECK9:       omp.precond.then:
13251 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13252 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13253 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13254 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
13255 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
13256 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
13257 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
13258 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
13259 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
13260 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13261 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13262 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
13263 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13264 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13265 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13266 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
13267 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
13268 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13269 // CHECK9:       omp.dispatch.cond:
13270 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13271 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
13272 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13273 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
13274 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13275 // CHECK9:       omp.dispatch.body:
13276 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13277 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
13278 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13279 // CHECK9:       omp.inner.for.cond:
13280 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13281 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
13282 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
13283 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13284 // CHECK9:       omp.inner.for.body:
13285 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13286 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
13287 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13288 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
13289 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
13290 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13291 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
13292 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
13293 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
13294 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
13295 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13296 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
13297 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
13298 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
13299 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
13300 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
13301 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
13302 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
13303 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
13304 // CHECK9-NEXT:    store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
13305 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13306 // CHECK9:       omp.body.continue:
13307 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13308 // CHECK9:       omp.inner.for.inc:
13309 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13310 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
13311 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
13312 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
13313 // CHECK9:       omp.inner.for.end:
13314 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13315 // CHECK9:       omp.dispatch.inc:
13316 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
13317 // CHECK9:       omp.dispatch.end:
13318 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13319 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13320 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13321 // CHECK9:       .omp.final.then:
13322 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13323 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
13324 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
13325 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
13326 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
13327 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
13328 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13329 // CHECK9:       .omp.final.done:
13330 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
13331 // CHECK9:       omp.precond.end:
13332 // CHECK9-NEXT:    ret void
13333 //
13334 //
13335 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
13336 // CHECK9-SAME: () #[[ATTR4:[0-9]+]] {
13337 // CHECK9-NEXT:  entry:
13338 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
13339 // CHECK9-NEXT:    ret void
13340 //
13341 //
13342 // CHECK10-LABEL: define {{[^@]+}}@main
13343 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
13344 // CHECK10-NEXT:  entry:
13345 // CHECK10-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
13346 // CHECK10-NEXT:    [[A:%.*]] = alloca double*, align 8
13347 // CHECK10-NEXT:    [[B:%.*]] = alloca double*, align 8
13348 // CHECK10-NEXT:    [[C:%.*]] = alloca double*, align 8
13349 // CHECK10-NEXT:    [[N:%.*]] = alloca i32, align 4
13350 // CHECK10-NEXT:    [[CH:%.*]] = alloca i32, align 4
13351 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
13352 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
13353 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
13354 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
13355 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13356 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13357 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13358 // CHECK10-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
13359 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
13360 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
13361 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
13362 // CHECK10-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
13363 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
13364 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
13365 // CHECK10-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
13366 // CHECK10-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
13367 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
13368 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
13369 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
13370 // CHECK10-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
13371 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
13372 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
13373 // CHECK10-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
13374 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
13375 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
13376 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
13377 // CHECK10-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
13378 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
13379 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
13380 // CHECK10-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
13381 // CHECK10-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
13382 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
13383 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
13384 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
13385 // CHECK10-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
13386 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
13387 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
13388 // CHECK10-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
13389 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
13390 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
13391 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
13392 // CHECK10-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
13393 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
13394 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
13395 // CHECK10-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
13396 // CHECK10-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
13397 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
13398 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
13399 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
13400 // CHECK10-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
13401 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
13402 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
13403 // CHECK10-NEXT:    store i32 0, i32* [[RETVAL]], align 4
13404 // CHECK10-NEXT:    store i32 10000, i32* [[N]], align 4
13405 // CHECK10-NEXT:    store i32 100, i32* [[CH]], align 4
13406 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
13407 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
13408 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
13409 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
13410 // CHECK10-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 8
13411 // CHECK10-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 8
13412 // CHECK10-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 8
13413 // CHECK10-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13414 // CHECK10-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
13415 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
13416 // CHECK10-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13417 // CHECK10-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
13418 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
13419 // CHECK10-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
13420 // CHECK10-NEXT:    store i8* null, i8** [[TMP9]], align 8
13421 // CHECK10-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13422 // CHECK10-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
13423 // CHECK10-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 8
13424 // CHECK10-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13425 // CHECK10-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
13426 // CHECK10-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 8
13427 // CHECK10-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
13428 // CHECK10-NEXT:    store i8* null, i8** [[TMP14]], align 8
13429 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13430 // CHECK10-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
13431 // CHECK10-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 8
13432 // CHECK10-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13433 // CHECK10-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
13434 // CHECK10-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 8
13435 // CHECK10-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
13436 // CHECK10-NEXT:    store i8* null, i8** [[TMP19]], align 8
13437 // CHECK10-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
13438 // CHECK10-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
13439 // CHECK10-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 8
13440 // CHECK10-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
13441 // CHECK10-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
13442 // CHECK10-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 8
13443 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
13444 // CHECK10-NEXT:    store i8* null, i8** [[TMP24]], align 8
13445 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13446 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13447 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
13448 // CHECK10-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
13449 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13450 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
13451 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13452 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13453 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13454 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13455 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
13456 // CHECK10-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
13457 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
13458 // CHECK10-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13459 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13460 // CHECK10-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13461 // CHECK10:       omp_offload.failed:
13462 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
13463 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13464 // CHECK10:       omp_offload.cont:
13465 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
13466 // CHECK10-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
13467 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
13468 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
13469 // CHECK10-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 8
13470 // CHECK10-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 8
13471 // CHECK10-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 8
13472 // CHECK10-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
13473 // CHECK10-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
13474 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
13475 // CHECK10-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
13476 // CHECK10-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
13477 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
13478 // CHECK10-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
13479 // CHECK10-NEXT:    store i8* null, i8** [[TMP42]], align 8
13480 // CHECK10-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
13481 // CHECK10-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
13482 // CHECK10-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 8
13483 // CHECK10-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
13484 // CHECK10-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
13485 // CHECK10-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 8
13486 // CHECK10-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
13487 // CHECK10-NEXT:    store i8* null, i8** [[TMP47]], align 8
13488 // CHECK10-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
13489 // CHECK10-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
13490 // CHECK10-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 8
13491 // CHECK10-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
13492 // CHECK10-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
13493 // CHECK10-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 8
13494 // CHECK10-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
13495 // CHECK10-NEXT:    store i8* null, i8** [[TMP52]], align 8
13496 // CHECK10-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
13497 // CHECK10-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
13498 // CHECK10-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 8
13499 // CHECK10-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
13500 // CHECK10-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
13501 // CHECK10-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 8
13502 // CHECK10-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
13503 // CHECK10-NEXT:    store i8* null, i8** [[TMP57]], align 8
13504 // CHECK10-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
13505 // CHECK10-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
13506 // CHECK10-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
13507 // CHECK10-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
13508 // CHECK10-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
13509 // CHECK10-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
13510 // CHECK10-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
13511 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
13512 // CHECK10-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
13513 // CHECK10-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
13514 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
13515 // CHECK10-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
13516 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
13517 // CHECK10-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13518 // CHECK10-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
13519 // CHECK10-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
13520 // CHECK10:       omp_offload.failed15:
13521 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
13522 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
13523 // CHECK10:       omp_offload.cont16:
13524 // CHECK10-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
13525 // CHECK10-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
13526 // CHECK10-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
13527 // CHECK10-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
13528 // CHECK10-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
13529 // CHECK10-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
13530 // CHECK10-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
13531 // CHECK10-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
13532 // CHECK10-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 8
13533 // CHECK10-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 8
13534 // CHECK10-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 8
13535 // CHECK10-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
13536 // CHECK10-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
13537 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
13538 // CHECK10-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
13539 // CHECK10-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
13540 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
13541 // CHECK10-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
13542 // CHECK10-NEXT:    store i8* null, i8** [[TMP77]], align 8
13543 // CHECK10-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
13544 // CHECK10-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
13545 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
13546 // CHECK10-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
13547 // CHECK10-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
13548 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
13549 // CHECK10-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
13550 // CHECK10-NEXT:    store i8* null, i8** [[TMP82]], align 8
13551 // CHECK10-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
13552 // CHECK10-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
13553 // CHECK10-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 8
13554 // CHECK10-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
13555 // CHECK10-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
13556 // CHECK10-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 8
13557 // CHECK10-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
13558 // CHECK10-NEXT:    store i8* null, i8** [[TMP87]], align 8
13559 // CHECK10-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
13560 // CHECK10-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
13561 // CHECK10-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 8
13562 // CHECK10-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
13563 // CHECK10-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
13564 // CHECK10-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 8
13565 // CHECK10-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
13566 // CHECK10-NEXT:    store i8* null, i8** [[TMP92]], align 8
13567 // CHECK10-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
13568 // CHECK10-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
13569 // CHECK10-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 8
13570 // CHECK10-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
13571 // CHECK10-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
13572 // CHECK10-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 8
13573 // CHECK10-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
13574 // CHECK10-NEXT:    store i8* null, i8** [[TMP97]], align 8
13575 // CHECK10-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
13576 // CHECK10-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
13577 // CHECK10-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
13578 // CHECK10-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
13579 // CHECK10-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
13580 // CHECK10-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
13581 // CHECK10-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
13582 // CHECK10-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
13583 // CHECK10-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
13584 // CHECK10-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
13585 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
13586 // CHECK10-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
13587 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
13588 // CHECK10-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13589 // CHECK10-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
13590 // CHECK10-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
13591 // CHECK10:       omp_offload.failed30:
13592 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP67]], i64 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
13593 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
13594 // CHECK10:       omp_offload.cont31:
13595 // CHECK10-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
13596 // CHECK10-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
13597 // CHECK10-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
13598 // CHECK10-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
13599 // CHECK10-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 8
13600 // CHECK10-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 8
13601 // CHECK10-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 8
13602 // CHECK10-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13603 // CHECK10-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
13604 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
13605 // CHECK10-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13606 // CHECK10-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
13607 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
13608 // CHECK10-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
13609 // CHECK10-NEXT:    store i8* null, i8** [[TMP115]], align 8
13610 // CHECK10-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
13611 // CHECK10-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
13612 // CHECK10-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 8
13613 // CHECK10-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
13614 // CHECK10-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
13615 // CHECK10-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 8
13616 // CHECK10-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
13617 // CHECK10-NEXT:    store i8* null, i8** [[TMP120]], align 8
13618 // CHECK10-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
13619 // CHECK10-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
13620 // CHECK10-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 8
13621 // CHECK10-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
13622 // CHECK10-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
13623 // CHECK10-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 8
13624 // CHECK10-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
13625 // CHECK10-NEXT:    store i8* null, i8** [[TMP125]], align 8
13626 // CHECK10-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
13627 // CHECK10-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
13628 // CHECK10-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 8
13629 // CHECK10-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
13630 // CHECK10-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
13631 // CHECK10-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 8
13632 // CHECK10-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
13633 // CHECK10-NEXT:    store i8* null, i8** [[TMP130]], align 8
13634 // CHECK10-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13635 // CHECK10-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13636 // CHECK10-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
13637 // CHECK10-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
13638 // CHECK10-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
13639 // CHECK10-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
13640 // CHECK10-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
13641 // CHECK10-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
13642 // CHECK10-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
13643 // CHECK10-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
13644 // CHECK10-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
13645 // CHECK10-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
13646 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
13647 // CHECK10-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13648 // CHECK10-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
13649 // CHECK10-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
13650 // CHECK10:       omp_offload.failed44:
13651 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
13652 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
13653 // CHECK10:       omp_offload.cont45:
13654 // CHECK10-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
13655 // CHECK10-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
13656 // CHECK10-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
13657 // CHECK10-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
13658 // CHECK10-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
13659 // CHECK10-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
13660 // CHECK10-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
13661 // CHECK10-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
13662 // CHECK10-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 8
13663 // CHECK10-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 8
13664 // CHECK10-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 8
13665 // CHECK10-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13666 // CHECK10-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
13667 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
13668 // CHECK10-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13669 // CHECK10-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
13670 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
13671 // CHECK10-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
13672 // CHECK10-NEXT:    store i8* null, i8** [[TMP150]], align 8
13673 // CHECK10-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
13674 // CHECK10-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
13675 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
13676 // CHECK10-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
13677 // CHECK10-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
13678 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
13679 // CHECK10-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
13680 // CHECK10-NEXT:    store i8* null, i8** [[TMP155]], align 8
13681 // CHECK10-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
13682 // CHECK10-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
13683 // CHECK10-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 8
13684 // CHECK10-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
13685 // CHECK10-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
13686 // CHECK10-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 8
13687 // CHECK10-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
13688 // CHECK10-NEXT:    store i8* null, i8** [[TMP160]], align 8
13689 // CHECK10-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
13690 // CHECK10-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
13691 // CHECK10-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 8
13692 // CHECK10-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
13693 // CHECK10-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
13694 // CHECK10-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 8
13695 // CHECK10-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
13696 // CHECK10-NEXT:    store i8* null, i8** [[TMP165]], align 8
13697 // CHECK10-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
13698 // CHECK10-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
13699 // CHECK10-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 8
13700 // CHECK10-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
13701 // CHECK10-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
13702 // CHECK10-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 8
13703 // CHECK10-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
13704 // CHECK10-NEXT:    store i8* null, i8** [[TMP170]], align 8
13705 // CHECK10-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13706 // CHECK10-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13707 // CHECK10-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
13708 // CHECK10-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
13709 // CHECK10-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
13710 // CHECK10-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
13711 // CHECK10-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
13712 // CHECK10-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
13713 // CHECK10-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
13714 // CHECK10-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
13715 // CHECK10-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
13716 // CHECK10-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
13717 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
13718 // CHECK10-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13719 // CHECK10-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
13720 // CHECK10-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
13721 // CHECK10:       omp_offload.failed60:
13722 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP140]], i64 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
13723 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
13724 // CHECK10:       omp_offload.cont61:
13725 // CHECK10-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
13726 // CHECK10-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
13727 // CHECK10-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
13728 // CHECK10-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
13729 // CHECK10-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 8
13730 // CHECK10-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 8
13731 // CHECK10-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 8
13732 // CHECK10-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
13733 // CHECK10-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
13734 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
13735 // CHECK10-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
13736 // CHECK10-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
13737 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
13738 // CHECK10-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
13739 // CHECK10-NEXT:    store i8* null, i8** [[TMP188]], align 8
13740 // CHECK10-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
13741 // CHECK10-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
13742 // CHECK10-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 8
13743 // CHECK10-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
13744 // CHECK10-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
13745 // CHECK10-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 8
13746 // CHECK10-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
13747 // CHECK10-NEXT:    store i8* null, i8** [[TMP193]], align 8
13748 // CHECK10-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
13749 // CHECK10-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
13750 // CHECK10-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 8
13751 // CHECK10-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
13752 // CHECK10-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
13753 // CHECK10-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 8
13754 // CHECK10-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
13755 // CHECK10-NEXT:    store i8* null, i8** [[TMP198]], align 8
13756 // CHECK10-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
13757 // CHECK10-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
13758 // CHECK10-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 8
13759 // CHECK10-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
13760 // CHECK10-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
13761 // CHECK10-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 8
13762 // CHECK10-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
13763 // CHECK10-NEXT:    store i8* null, i8** [[TMP203]], align 8
13764 // CHECK10-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
13765 // CHECK10-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
13766 // CHECK10-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
13767 // CHECK10-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
13768 // CHECK10-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
13769 // CHECK10-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
13770 // CHECK10-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
13771 // CHECK10-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
13772 // CHECK10-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
13773 // CHECK10-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
13774 // CHECK10-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
13775 // CHECK10-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
13776 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
13777 // CHECK10-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13778 // CHECK10-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
13779 // CHECK10-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
13780 // CHECK10:       omp_offload.failed74:
13781 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
13782 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
13783 // CHECK10:       omp_offload.cont75:
13784 // CHECK10-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
13785 // CHECK10-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
13786 // CHECK10-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
13787 // CHECK10-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
13788 // CHECK10-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
13789 // CHECK10-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
13790 // CHECK10-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
13791 // CHECK10-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
13792 // CHECK10-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 8
13793 // CHECK10-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 8
13794 // CHECK10-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 8
13795 // CHECK10-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
13796 // CHECK10-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
13797 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
13798 // CHECK10-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
13799 // CHECK10-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
13800 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
13801 // CHECK10-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
13802 // CHECK10-NEXT:    store i8* null, i8** [[TMP223]], align 8
13803 // CHECK10-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
13804 // CHECK10-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
13805 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
13806 // CHECK10-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
13807 // CHECK10-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
13808 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
13809 // CHECK10-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
13810 // CHECK10-NEXT:    store i8* null, i8** [[TMP228]], align 8
13811 // CHECK10-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
13812 // CHECK10-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
13813 // CHECK10-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 8
13814 // CHECK10-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
13815 // CHECK10-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
13816 // CHECK10-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 8
13817 // CHECK10-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
13818 // CHECK10-NEXT:    store i8* null, i8** [[TMP233]], align 8
13819 // CHECK10-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
13820 // CHECK10-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
13821 // CHECK10-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 8
13822 // CHECK10-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
13823 // CHECK10-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
13824 // CHECK10-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 8
13825 // CHECK10-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
13826 // CHECK10-NEXT:    store i8* null, i8** [[TMP238]], align 8
13827 // CHECK10-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
13828 // CHECK10-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
13829 // CHECK10-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 8
13830 // CHECK10-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
13831 // CHECK10-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
13832 // CHECK10-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 8
13833 // CHECK10-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
13834 // CHECK10-NEXT:    store i8* null, i8** [[TMP243]], align 8
13835 // CHECK10-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
13836 // CHECK10-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
13837 // CHECK10-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
13838 // CHECK10-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
13839 // CHECK10-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
13840 // CHECK10-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
13841 // CHECK10-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
13842 // CHECK10-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
13843 // CHECK10-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
13844 // CHECK10-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
13845 // CHECK10-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
13846 // CHECK10-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
13847 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
13848 // CHECK10-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13849 // CHECK10-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
13850 // CHECK10-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
13851 // CHECK10:       omp_offload.failed90:
13852 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP213]], i64 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
13853 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
13854 // CHECK10:       omp_offload.cont91:
13855 // CHECK10-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
13856 // CHECK10-NEXT:    ret i32 [[CALL]]
13857 //
13858 //
13859 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
13860 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
13861 // CHECK10-NEXT:  entry:
13862 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
13863 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
13864 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
13865 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
13866 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
13867 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
13868 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
13869 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
13870 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
13871 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
13872 // CHECK10-NEXT:    ret void
13873 //
13874 //
13875 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
13876 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13877 // CHECK10-NEXT:  entry:
13878 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13879 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13880 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13881 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
13882 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
13883 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
13884 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13885 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13886 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13887 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13888 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
13889 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13890 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13891 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13892 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13893 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
13894 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
13895 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
13896 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
13897 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
13898 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
13899 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
13900 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
13901 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
13902 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
13903 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
13904 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13905 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13906 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13907 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13908 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13909 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13910 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13911 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
13912 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13913 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13914 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13915 // CHECK10:       omp.precond.then:
13916 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13917 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13918 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13919 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13920 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13921 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13922 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13923 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13924 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13925 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13926 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13927 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13928 // CHECK10:       cond.true:
13929 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13930 // CHECK10-NEXT:    br label [[COND_END:%.*]]
13931 // CHECK10:       cond.false:
13932 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13933 // CHECK10-NEXT:    br label [[COND_END]]
13934 // CHECK10:       cond.end:
13935 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13936 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13937 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13938 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13939 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13940 // CHECK10:       omp.inner.for.cond:
13941 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13942 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
13943 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13944 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13945 // CHECK10:       omp.inner.for.body:
13946 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
13947 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
13948 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
13949 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
13950 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
13951 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13952 // CHECK10:       omp.inner.for.inc:
13953 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13954 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
13955 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
13956 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
13957 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
13958 // CHECK10:       omp.inner.for.end:
13959 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13960 // CHECK10:       omp.loop.exit:
13961 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
13962 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
13963 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
13964 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13965 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
13966 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13967 // CHECK10:       .omp.final.then:
13968 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13969 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
13970 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13971 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13972 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13973 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
13974 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13975 // CHECK10:       .omp.final.done:
13976 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
13977 // CHECK10:       omp.precond.end:
13978 // CHECK10-NEXT:    ret void
13979 //
13980 //
13981 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1
13982 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
13983 // CHECK10-NEXT:  entry:
13984 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
13985 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
13986 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
13987 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
13988 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
13989 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
13990 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
13991 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
13992 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13993 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13994 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13995 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13996 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
13997 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13998 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13999 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14000 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14001 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14002 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14003 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14004 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14005 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14006 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14007 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14008 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14009 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14010 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14011 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14012 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14013 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14014 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14015 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14016 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14017 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14018 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14019 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14020 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14021 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14022 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14023 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14024 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14025 // CHECK10:       omp.precond.then:
14026 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14027 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14028 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14029 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14030 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14031 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14032 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14033 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14034 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14035 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14036 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14037 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14038 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14039 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14040 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14041 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14042 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14043 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14044 // CHECK10:       cond.true:
14045 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14046 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14047 // CHECK10:       cond.false:
14048 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14049 // CHECK10-NEXT:    br label [[COND_END]]
14050 // CHECK10:       cond.end:
14051 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14052 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14053 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14054 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14055 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14056 // CHECK10:       omp.inner.for.cond:
14057 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14058 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
14059 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14060 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14061 // CHECK10:       omp.inner.for.body:
14062 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14063 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14064 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14065 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
14066 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
14067 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14068 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14069 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14070 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
14071 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
14072 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14073 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14074 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14075 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
14076 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14077 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
14078 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
14079 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14080 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14081 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
14082 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14083 // CHECK10:       omp.body.continue:
14084 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14085 // CHECK10:       omp.inner.for.inc:
14086 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14087 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14088 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14089 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
14090 // CHECK10:       omp.inner.for.end:
14091 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14092 // CHECK10:       omp.loop.exit:
14093 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14094 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14095 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14096 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14097 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14098 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14099 // CHECK10:       .omp.final.then:
14100 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14101 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14102 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14103 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14104 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14105 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14106 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14107 // CHECK10:       .omp.final.done:
14108 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14109 // CHECK10:       omp.precond.end:
14110 // CHECK10-NEXT:    ret void
14111 //
14112 //
14113 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
14114 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14115 // CHECK10-NEXT:  entry:
14116 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14117 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14118 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14119 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14120 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14121 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14122 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14123 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14124 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14125 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14126 // CHECK10-NEXT:    ret void
14127 //
14128 //
14129 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2
14130 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14131 // CHECK10-NEXT:  entry:
14132 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14133 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14134 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14135 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14136 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14137 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14138 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14139 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14140 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14141 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14142 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14143 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14144 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14145 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14146 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14147 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14148 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14149 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14150 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14151 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14152 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14153 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14154 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14155 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14156 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14157 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14158 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14159 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14160 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14161 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14162 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14163 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14164 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14165 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14166 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14167 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14168 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14169 // CHECK10:       omp.precond.then:
14170 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14171 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14172 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
14173 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14174 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14175 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14176 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14177 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14178 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14179 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14180 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14181 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14182 // CHECK10:       cond.true:
14183 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14184 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14185 // CHECK10:       cond.false:
14186 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14187 // CHECK10-NEXT:    br label [[COND_END]]
14188 // CHECK10:       cond.end:
14189 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14190 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14191 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14192 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14193 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14194 // CHECK10:       omp.inner.for.cond:
14195 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14196 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
14197 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14198 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14199 // CHECK10:       omp.inner.for.body:
14200 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
14201 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
14202 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
14203 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14204 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
14205 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14206 // CHECK10:       omp.inner.for.inc:
14207 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14208 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
14209 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14210 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
14211 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
14212 // CHECK10:       omp.inner.for.end:
14213 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14214 // CHECK10:       omp.loop.exit:
14215 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14216 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
14217 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
14218 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14219 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
14220 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14221 // CHECK10:       .omp.final.then:
14222 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14223 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
14224 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14225 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14226 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14227 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14228 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14229 // CHECK10:       .omp.final.done:
14230 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14231 // CHECK10:       omp.precond.end:
14232 // CHECK10-NEXT:    ret void
14233 //
14234 //
14235 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3
14236 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14237 // CHECK10-NEXT:  entry:
14238 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14239 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14240 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14241 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14242 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14243 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14244 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14245 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14246 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14247 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14248 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14249 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14250 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14251 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14252 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14253 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14254 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14255 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14256 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14257 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14258 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14259 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14260 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14261 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14262 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14263 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14264 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14265 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14266 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14267 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14268 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14269 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14270 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14271 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14272 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14273 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14274 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14275 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14276 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14277 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14278 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14279 // CHECK10:       omp.precond.then:
14280 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14281 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14282 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14283 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14284 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14285 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14286 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14287 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14288 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14289 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14290 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14291 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14292 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14293 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14294 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14295 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14296 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14297 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14298 // CHECK10:       cond.true:
14299 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14300 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14301 // CHECK10:       cond.false:
14302 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14303 // CHECK10-NEXT:    br label [[COND_END]]
14304 // CHECK10:       cond.end:
14305 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14306 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14307 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14308 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14309 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14310 // CHECK10:       omp.inner.for.cond:
14311 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14312 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
14313 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14314 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14315 // CHECK10:       omp.inner.for.body:
14316 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14317 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14318 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14319 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
14320 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
14321 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14322 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14323 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14324 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
14325 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
14326 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14327 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14328 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14329 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
14330 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14331 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
14332 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
14333 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14334 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14335 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
14336 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14337 // CHECK10:       omp.body.continue:
14338 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14339 // CHECK10:       omp.inner.for.inc:
14340 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14341 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14342 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
14343 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
14344 // CHECK10:       omp.inner.for.end:
14345 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14346 // CHECK10:       omp.loop.exit:
14347 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14348 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14349 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14350 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14351 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14352 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14353 // CHECK10:       .omp.final.then:
14354 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14355 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14356 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14357 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14358 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14359 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14360 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14361 // CHECK10:       .omp.final.done:
14362 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14363 // CHECK10:       omp.precond.end:
14364 // CHECK10-NEXT:    ret void
14365 //
14366 //
14367 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
14368 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14369 // CHECK10-NEXT:  entry:
14370 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
14371 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14372 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14373 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14374 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14375 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
14376 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14377 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14378 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14379 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14380 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
14381 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14382 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14383 // CHECK10-NEXT:    ret void
14384 //
14385 //
14386 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6
14387 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14388 // CHECK10-NEXT:  entry:
14389 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14390 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14391 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
14392 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14393 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14394 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14395 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14396 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14397 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14398 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14399 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14400 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14401 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14402 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14403 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14404 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14405 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14406 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14407 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14408 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
14409 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14410 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14411 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14412 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14413 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
14414 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14415 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
14416 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
14417 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
14418 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
14419 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
14420 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14421 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
14422 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14423 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14424 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14425 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14426 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14427 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
14428 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14429 // CHECK10:       omp.precond.then:
14430 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14431 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14432 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
14433 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14434 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14435 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
14436 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14437 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14438 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
14439 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14440 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14441 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14442 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14443 // CHECK10:       cond.true:
14444 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14445 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14446 // CHECK10:       cond.false:
14447 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14448 // CHECK10-NEXT:    br label [[COND_END]]
14449 // CHECK10:       cond.end:
14450 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14451 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14452 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14453 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14454 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14455 // CHECK10:       omp.inner.for.cond:
14456 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14457 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14458 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
14459 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
14460 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14461 // CHECK10:       omp.inner.for.body:
14462 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14463 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14464 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14465 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
14466 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
14467 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14468 // CHECK10:       omp.inner.for.inc:
14469 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14470 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14471 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
14472 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14473 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14474 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14475 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
14476 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14477 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14478 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
14479 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
14480 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14481 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14482 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14483 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
14484 // CHECK10-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
14485 // CHECK10:       cond.true10:
14486 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
14487 // CHECK10-NEXT:    br label [[COND_END12:%.*]]
14488 // CHECK10:       cond.false11:
14489 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14490 // CHECK10-NEXT:    br label [[COND_END12]]
14491 // CHECK10:       cond.end12:
14492 // CHECK10-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
14493 // CHECK10-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
14494 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
14495 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
14496 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
14497 // CHECK10:       omp.inner.for.end:
14498 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14499 // CHECK10:       omp.loop.exit:
14500 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14501 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
14502 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
14503 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14504 // CHECK10-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
14505 // CHECK10-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14506 // CHECK10:       .omp.final.then:
14507 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14508 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
14509 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
14510 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
14511 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
14512 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
14513 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14514 // CHECK10:       .omp.final.done:
14515 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14516 // CHECK10:       omp.precond.end:
14517 // CHECK10-NEXT:    ret void
14518 //
14519 //
14520 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7
14521 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14522 // CHECK10-NEXT:  entry:
14523 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14524 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14525 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14526 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14527 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14528 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14529 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14530 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14531 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14532 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14533 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14534 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14535 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14536 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14537 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14538 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14539 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14540 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14541 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14542 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14543 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14544 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14545 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14546 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14547 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14548 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14549 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14550 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14551 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14552 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14553 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14554 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14555 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14556 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14557 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14558 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14559 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14560 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14561 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14562 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14563 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14564 // CHECK10:       omp.precond.then:
14565 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14566 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14567 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14568 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14569 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14570 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14571 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14572 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14573 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14574 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14575 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14576 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14577 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14578 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14579 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14580 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14581 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14582 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14583 // CHECK10:       cond.true:
14584 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14585 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14586 // CHECK10:       cond.false:
14587 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14588 // CHECK10-NEXT:    br label [[COND_END]]
14589 // CHECK10:       cond.end:
14590 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14591 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14592 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14593 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14594 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14595 // CHECK10:       omp.inner.for.cond:
14596 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14597 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
14598 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14599 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14600 // CHECK10:       omp.inner.for.body:
14601 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14602 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14603 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14604 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
14605 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
14606 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14607 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14608 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14609 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
14610 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
14611 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14612 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14613 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14614 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
14615 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14616 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
14617 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
14618 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14619 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14620 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
14621 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14622 // CHECK10:       omp.body.continue:
14623 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14624 // CHECK10:       omp.inner.for.inc:
14625 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14626 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14627 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
14628 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
14629 // CHECK10:       omp.inner.for.end:
14630 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14631 // CHECK10:       omp.loop.exit:
14632 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14633 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14634 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14635 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14636 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14637 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14638 // CHECK10:       .omp.final.then:
14639 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14640 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14641 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14642 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14643 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14644 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14645 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14646 // CHECK10:       .omp.final.done:
14647 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14648 // CHECK10:       omp.precond.end:
14649 // CHECK10-NEXT:    ret void
14650 //
14651 //
14652 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
14653 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14654 // CHECK10-NEXT:  entry:
14655 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14656 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14657 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14658 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14659 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14660 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14661 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14662 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14663 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14664 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14665 // CHECK10-NEXT:    ret void
14666 //
14667 //
14668 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10
14669 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14670 // CHECK10-NEXT:  entry:
14671 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14672 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14673 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14674 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14675 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14676 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14677 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14678 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14679 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14680 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14681 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14682 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14683 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14684 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14685 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14686 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
14687 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14688 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14689 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14690 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14691 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14692 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14693 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14694 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14695 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14696 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14697 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14698 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14699 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14700 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14701 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14702 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14703 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14704 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14705 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14706 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14707 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14708 // CHECK10:       omp.precond.then:
14709 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14710 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14711 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
14712 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14713 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14714 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14715 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14716 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14717 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14718 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14719 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14720 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14721 // CHECK10:       cond.true:
14722 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14723 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14724 // CHECK10:       cond.false:
14725 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14726 // CHECK10-NEXT:    br label [[COND_END]]
14727 // CHECK10:       cond.end:
14728 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14729 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14730 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14731 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14732 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14733 // CHECK10:       omp.inner.for.cond:
14734 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14735 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
14736 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14737 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14738 // CHECK10:       omp.inner.for.body:
14739 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
14740 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
14741 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
14742 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
14743 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
14744 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14745 // CHECK10:       omp.inner.for.inc:
14746 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14747 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
14748 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14749 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
14750 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
14751 // CHECK10:       omp.inner.for.end:
14752 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14753 // CHECK10:       omp.loop.exit:
14754 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14755 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
14756 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
14757 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14758 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
14759 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14760 // CHECK10:       .omp.final.then:
14761 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14762 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
14763 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14764 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14765 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14766 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
14767 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14768 // CHECK10:       .omp.final.done:
14769 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14770 // CHECK10:       omp.precond.end:
14771 // CHECK10-NEXT:    ret void
14772 //
14773 //
14774 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11
14775 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14776 // CHECK10-NEXT:  entry:
14777 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14778 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14779 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
14780 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
14781 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14782 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14783 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14784 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14785 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14786 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14787 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14788 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14789 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14790 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14791 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14792 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14793 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14794 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14795 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14796 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14797 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14798 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14799 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14800 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14801 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14802 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14803 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14804 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
14805 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
14806 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
14807 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14808 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14809 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14810 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14811 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14812 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14813 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14814 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14815 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14816 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14817 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14818 // CHECK10:       omp.precond.then:
14819 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14820 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14821 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14822 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
14823 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
14824 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
14825 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
14826 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
14827 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
14828 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14829 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14830 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14831 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14832 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14833 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14834 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14835 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14836 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14837 // CHECK10:       cond.true:
14838 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14839 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14840 // CHECK10:       cond.false:
14841 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14842 // CHECK10-NEXT:    br label [[COND_END]]
14843 // CHECK10:       cond.end:
14844 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14845 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14846 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14847 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14848 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14849 // CHECK10:       omp.inner.for.cond:
14850 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14851 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
14852 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14853 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14854 // CHECK10:       omp.inner.for.body:
14855 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14856 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14857 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14858 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
14859 // CHECK10-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
14860 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14861 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
14862 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
14863 // CHECK10-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
14864 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
14865 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14866 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
14867 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
14868 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
14869 // CHECK10-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
14870 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
14871 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
14872 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
14873 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
14874 // CHECK10-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
14875 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14876 // CHECK10:       omp.body.continue:
14877 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14878 // CHECK10:       omp.inner.for.inc:
14879 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14880 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
14881 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
14882 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
14883 // CHECK10:       omp.inner.for.end:
14884 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14885 // CHECK10:       omp.loop.exit:
14886 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14887 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
14888 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
14889 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14890 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14891 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14892 // CHECK10:       .omp.final.then:
14893 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14894 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
14895 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
14896 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
14897 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
14898 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
14899 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14900 // CHECK10:       .omp.final.done:
14901 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
14902 // CHECK10:       omp.precond.end:
14903 // CHECK10-NEXT:    ret void
14904 //
14905 //
14906 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
14907 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
14908 // CHECK10-NEXT:  entry:
14909 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
14910 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
14911 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
14912 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
14913 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
14914 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
14915 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
14916 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
14917 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
14918 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
14919 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
14920 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
14921 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
14922 // CHECK10-NEXT:    ret void
14923 //
14924 //
14925 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14
14926 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
14927 // CHECK10-NEXT:  entry:
14928 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
14929 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
14930 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
14931 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
14932 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
14933 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
14934 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
14935 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14936 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14937 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14938 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14939 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14940 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14941 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14942 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14943 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14944 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14945 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
14946 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
14947 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
14948 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
14949 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
14950 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
14951 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
14952 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
14953 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
14954 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
14955 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
14956 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
14957 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
14958 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
14959 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
14960 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
14961 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
14962 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14963 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14964 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
14965 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14966 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14967 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14968 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
14969 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14970 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
14971 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14972 // CHECK10:       omp.precond.then:
14973 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14974 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14975 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
14976 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14977 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14978 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
14979 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14980 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14981 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14982 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14983 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14984 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14985 // CHECK10:       cond.true:
14986 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14987 // CHECK10-NEXT:    br label [[COND_END:%.*]]
14988 // CHECK10:       cond.false:
14989 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14990 // CHECK10-NEXT:    br label [[COND_END]]
14991 // CHECK10:       cond.end:
14992 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14993 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14994 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14995 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14996 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14997 // CHECK10:       omp.inner.for.cond:
14998 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
14999 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
15000 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
15001 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15002 // CHECK10:       omp.inner.for.body:
15003 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
15004 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15005 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
15006 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
15007 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
15008 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
15009 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
15010 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
15011 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
15012 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15013 // CHECK10:       omp.inner.for.inc:
15014 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15015 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
15016 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
15017 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
15018 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
15019 // CHECK10:       omp.inner.for.end:
15020 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15021 // CHECK10:       omp.loop.exit:
15022 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15023 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
15024 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
15025 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15026 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
15027 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15028 // CHECK10:       .omp.final.then:
15029 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15030 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
15031 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15032 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
15033 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
15034 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
15035 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15036 // CHECK10:       .omp.final.done:
15037 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15038 // CHECK10:       omp.precond.end:
15039 // CHECK10-NEXT:    ret void
15040 //
15041 //
15042 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15
15043 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
15044 // CHECK10-NEXT:  entry:
15045 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15046 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15047 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15048 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15049 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15050 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15051 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15052 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15053 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
15054 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15055 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15056 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15057 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15058 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15059 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15060 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15061 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15062 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15063 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
15064 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15065 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15066 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15067 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15068 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15069 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15070 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15071 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15072 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
15073 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15074 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15075 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15076 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15077 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
15078 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15079 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15080 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15081 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15082 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15083 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15084 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15085 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15086 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15087 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15088 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15089 // CHECK10:       omp.precond.then:
15090 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15091 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15092 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15093 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15094 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
15095 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15096 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
15097 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
15098 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
15099 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15100 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15101 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
15102 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15103 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
15104 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
15105 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15106 // CHECK10:       omp.dispatch.cond:
15107 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15108 // CHECK10-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
15109 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15110 // CHECK10-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
15111 // CHECK10-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15112 // CHECK10:       cond.true:
15113 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15114 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15115 // CHECK10:       cond.false:
15116 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15117 // CHECK10-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
15118 // CHECK10-NEXT:    br label [[COND_END]]
15119 // CHECK10:       cond.end:
15120 // CHECK10-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
15121 // CHECK10-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
15122 // CHECK10-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
15123 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15124 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
15125 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
15126 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15127 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
15128 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15129 // CHECK10:       omp.dispatch.body:
15130 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15131 // CHECK10:       omp.inner.for.cond:
15132 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15133 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
15134 // CHECK10-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
15135 // CHECK10-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15136 // CHECK10:       omp.inner.for.body:
15137 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15138 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
15139 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15140 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
15141 // CHECK10-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
15142 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15143 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
15144 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
15145 // CHECK10-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
15146 // CHECK10-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
15147 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15148 // CHECK10-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
15149 // CHECK10-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM13]]
15150 // CHECK10-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !47
15151 // CHECK10-NEXT:    [[ADD15:%.*]] = fadd double [[TMP25]], [[TMP28]]
15152 // CHECK10-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
15153 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
15154 // CHECK10-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
15155 // CHECK10-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM16]]
15156 // CHECK10-NEXT:    store double [[ADD15]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !47
15157 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15158 // CHECK10:       omp.body.continue:
15159 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15160 // CHECK10:       omp.inner.for.inc:
15161 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15162 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP31]], 1
15163 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
15164 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
15165 // CHECK10:       omp.inner.for.end:
15166 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15167 // CHECK10:       omp.dispatch.inc:
15168 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15169 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15170 // CHECK10-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
15171 // CHECK10-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
15172 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15173 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
15174 // CHECK10-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
15175 // CHECK10-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
15176 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15177 // CHECK10:       omp.dispatch.end:
15178 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15179 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
15180 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
15181 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15182 // CHECK10-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
15183 // CHECK10-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15184 // CHECK10:       .omp.final.then:
15185 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15186 // CHECK10-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP40]], 0
15187 // CHECK10-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
15188 // CHECK10-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
15189 // CHECK10-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
15190 // CHECK10-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
15191 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15192 // CHECK10:       .omp.final.done:
15193 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15194 // CHECK10:       omp.precond.end:
15195 // CHECK10-NEXT:    ret void
15196 //
15197 //
15198 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
15199 // CHECK10-SAME: (i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
15200 // CHECK10-NEXT:  entry:
15201 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
15202 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
15203 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
15204 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
15205 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
15206 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
15207 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
15208 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
15209 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
15210 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
15211 // CHECK10-NEXT:    ret void
15212 //
15213 //
15214 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18
15215 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15216 // CHECK10-NEXT:  entry:
15217 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15218 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15219 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15220 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15221 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15222 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15223 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15224 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15225 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15226 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15227 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15228 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15229 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15230 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15231 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15232 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
15233 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15234 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15235 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15236 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15237 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15238 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15239 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15240 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15241 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15242 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15243 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15244 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15245 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15246 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15247 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15248 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15249 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15250 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15251 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15252 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15253 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15254 // CHECK10:       omp.precond.then:
15255 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15256 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15257 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
15258 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15259 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15260 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15261 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
15262 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15263 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15264 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15265 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
15266 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15267 // CHECK10:       cond.true:
15268 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15269 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15270 // CHECK10:       cond.false:
15271 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15272 // CHECK10-NEXT:    br label [[COND_END]]
15273 // CHECK10:       cond.end:
15274 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
15275 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15276 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15277 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
15278 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15279 // CHECK10:       omp.inner.for.cond:
15280 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15281 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
15282 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
15283 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15284 // CHECK10:       omp.inner.for.body:
15285 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
15286 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
15287 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
15288 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15289 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
15290 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15291 // CHECK10:       omp.inner.for.inc:
15292 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15293 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
15294 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
15295 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
15296 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
15297 // CHECK10:       omp.inner.for.end:
15298 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15299 // CHECK10:       omp.loop.exit:
15300 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15301 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
15302 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
15303 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15304 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
15305 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15306 // CHECK10:       .omp.final.then:
15307 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15308 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
15309 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
15310 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
15311 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
15312 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
15313 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15314 // CHECK10:       .omp.final.done:
15315 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15316 // CHECK10:       omp.precond.end:
15317 // CHECK10-NEXT:    ret void
15318 //
15319 //
15320 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19
15321 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15322 // CHECK10-NEXT:  entry:
15323 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15324 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15325 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15326 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15327 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15328 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15329 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15330 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15331 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15332 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15333 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15334 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15335 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15336 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15337 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15338 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15339 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15340 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
15341 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15342 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15343 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15344 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15345 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15346 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15347 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15348 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15349 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15350 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15351 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15352 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15353 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15354 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15355 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15356 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15357 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15358 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15359 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15360 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15361 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15362 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15363 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15364 // CHECK10:       omp.precond.then:
15365 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15366 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15367 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15368 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15369 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
15370 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15371 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
15372 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
15373 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
15374 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15375 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15376 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15377 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15378 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15379 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
15380 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
15381 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15382 // CHECK10:       omp.dispatch.cond:
15383 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15384 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
15385 // CHECK10-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15386 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
15387 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15388 // CHECK10:       omp.dispatch.body:
15389 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15390 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
15391 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15392 // CHECK10:       omp.inner.for.cond:
15393 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15394 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
15395 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
15396 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15397 // CHECK10:       omp.inner.for.body:
15398 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15399 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
15400 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15401 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
15402 // CHECK10-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
15403 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15404 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
15405 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
15406 // CHECK10-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
15407 // CHECK10-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
15408 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15409 // CHECK10-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
15410 // CHECK10-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
15411 // CHECK10-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
15412 // CHECK10-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
15413 // CHECK10-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
15414 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
15415 // CHECK10-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
15416 // CHECK10-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
15417 // CHECK10-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
15418 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15419 // CHECK10:       omp.body.continue:
15420 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15421 // CHECK10:       omp.inner.for.inc:
15422 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15423 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
15424 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
15425 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
15426 // CHECK10:       omp.inner.for.end:
15427 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15428 // CHECK10:       omp.dispatch.inc:
15429 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15430 // CHECK10:       omp.dispatch.end:
15431 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15432 // CHECK10-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
15433 // CHECK10-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15434 // CHECK10:       .omp.final.then:
15435 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15436 // CHECK10-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
15437 // CHECK10-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
15438 // CHECK10-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
15439 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
15440 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
15441 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15442 // CHECK10:       .omp.final.done:
15443 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15444 // CHECK10:       omp.precond.end:
15445 // CHECK10-NEXT:    ret void
15446 //
15447 //
15448 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
15449 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
15450 // CHECK10-NEXT:  entry:
15451 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
15452 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
15453 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
15454 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
15455 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
15456 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
15457 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
15458 // CHECK10-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
15459 // CHECK10-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
15460 // CHECK10-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
15461 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
15462 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
15463 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
15464 // CHECK10-NEXT:    ret void
15465 //
15466 //
15467 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..22
15468 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
15469 // CHECK10-NEXT:  entry:
15470 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15471 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15472 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
15473 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15474 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15475 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15476 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15477 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15478 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15479 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15480 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15481 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15482 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15483 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15484 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15485 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15486 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15487 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
15488 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
15489 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15490 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15491 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
15492 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15493 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15494 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15495 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15496 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
15497 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15498 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
15499 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
15500 // CHECK10-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
15501 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
15502 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
15503 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
15504 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15505 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15506 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
15507 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15508 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15509 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15510 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15511 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15512 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
15513 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15514 // CHECK10:       omp.precond.then:
15515 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
15516 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15517 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
15518 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15519 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15520 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15521 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
15522 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
15523 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15524 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15525 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
15526 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15527 // CHECK10:       cond.true:
15528 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15529 // CHECK10-NEXT:    br label [[COND_END:%.*]]
15530 // CHECK10:       cond.false:
15531 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
15532 // CHECK10-NEXT:    br label [[COND_END]]
15533 // CHECK10:       cond.end:
15534 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
15535 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
15536 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
15537 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
15538 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15539 // CHECK10:       omp.inner.for.cond:
15540 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15541 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
15542 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
15543 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15544 // CHECK10:       omp.inner.for.body:
15545 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
15546 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
15547 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
15548 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
15549 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
15550 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
15551 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
15552 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
15553 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
15554 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15555 // CHECK10:       omp.inner.for.inc:
15556 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15557 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
15558 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
15559 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
15560 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
15561 // CHECK10:       omp.inner.for.end:
15562 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
15563 // CHECK10:       omp.loop.exit:
15564 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15565 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
15566 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
15567 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15568 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
15569 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15570 // CHECK10:       .omp.final.then:
15571 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15572 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
15573 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15574 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
15575 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
15576 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
15577 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15578 // CHECK10:       .omp.final.done:
15579 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15580 // CHECK10:       omp.precond.end:
15581 // CHECK10-NEXT:    ret void
15582 //
15583 //
15584 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..23
15585 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 8 dereferenceable(8) [[A:%.*]], double** nonnull align 8 dereferenceable(8) [[B:%.*]], double** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
15586 // CHECK10-NEXT:  entry:
15587 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
15588 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
15589 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
15590 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
15591 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
15592 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
15593 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
15594 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
15595 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
15596 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15597 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15598 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15599 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15600 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15601 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15602 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15603 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15604 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15605 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
15606 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
15607 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
15608 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15609 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15610 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
15611 // CHECK10-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
15612 // CHECK10-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
15613 // CHECK10-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
15614 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
15615 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
15616 // CHECK10-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
15617 // CHECK10-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
15618 // CHECK10-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
15619 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
15620 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
15621 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15622 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15623 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
15624 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15625 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
15626 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15627 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
15628 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15629 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
15630 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
15631 // CHECK10:       omp.precond.then:
15632 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15633 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15634 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
15635 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
15636 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
15637 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
15638 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
15639 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
15640 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
15641 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
15642 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
15643 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
15644 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15645 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
15646 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15647 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
15648 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
15649 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
15650 // CHECK10:       omp.dispatch.cond:
15651 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
15652 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
15653 // CHECK10-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
15654 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
15655 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15656 // CHECK10:       omp.dispatch.body:
15657 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15658 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
15659 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15660 // CHECK10:       omp.inner.for.cond:
15661 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15662 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
15663 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
15664 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15665 // CHECK10:       omp.inner.for.body:
15666 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15667 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
15668 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15669 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
15670 // CHECK10-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
15671 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15672 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
15673 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
15674 // CHECK10-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
15675 // CHECK10-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
15676 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15677 // CHECK10-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
15678 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
15679 // CHECK10-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
15680 // CHECK10-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
15681 // CHECK10-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
15682 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
15683 // CHECK10-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
15684 // CHECK10-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
15685 // CHECK10-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
15686 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15687 // CHECK10:       omp.body.continue:
15688 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15689 // CHECK10:       omp.inner.for.inc:
15690 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15691 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
15692 // CHECK10-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
15693 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
15694 // CHECK10:       omp.inner.for.end:
15695 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
15696 // CHECK10:       omp.dispatch.inc:
15697 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
15698 // CHECK10:       omp.dispatch.end:
15699 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
15700 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
15701 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15702 // CHECK10:       .omp.final.then:
15703 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15704 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
15705 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
15706 // CHECK10-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
15707 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
15708 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
15709 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
15710 // CHECK10:       .omp.final.done:
15711 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
15712 // CHECK10:       omp.precond.end:
15713 // CHECK10-NEXT:    ret void
15714 //
15715 //
15716 // CHECK10-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
15717 // CHECK10-SAME: () #[[ATTR3:[0-9]+]] comdat {
15718 // CHECK10-NEXT:  entry:
15719 // CHECK10-NEXT:    [[A:%.*]] = alloca i32*, align 8
15720 // CHECK10-NEXT:    [[B:%.*]] = alloca i32*, align 8
15721 // CHECK10-NEXT:    [[C:%.*]] = alloca i32*, align 8
15722 // CHECK10-NEXT:    [[N:%.*]] = alloca i32, align 4
15723 // CHECK10-NEXT:    [[CH:%.*]] = alloca i32, align 4
15724 // CHECK10-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
15725 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
15726 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
15727 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
15728 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15729 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15730 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15731 // CHECK10-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
15732 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
15733 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
15734 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
15735 // CHECK10-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
15736 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
15737 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
15738 // CHECK10-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
15739 // CHECK10-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
15740 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
15741 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
15742 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
15743 // CHECK10-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
15744 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
15745 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
15746 // CHECK10-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
15747 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
15748 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
15749 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
15750 // CHECK10-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
15751 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
15752 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
15753 // CHECK10-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
15754 // CHECK10-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
15755 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
15756 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
15757 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
15758 // CHECK10-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
15759 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
15760 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
15761 // CHECK10-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
15762 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
15763 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
15764 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
15765 // CHECK10-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
15766 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
15767 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
15768 // CHECK10-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
15769 // CHECK10-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
15770 // CHECK10-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
15771 // CHECK10-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
15772 // CHECK10-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
15773 // CHECK10-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
15774 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
15775 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
15776 // CHECK10-NEXT:    store i32 10000, i32* [[N]], align 4
15777 // CHECK10-NEXT:    store i32 100, i32* [[CH]], align 4
15778 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15779 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
15780 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
15781 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
15782 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
15783 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
15784 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
15785 // CHECK10-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15786 // CHECK10-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
15787 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
15788 // CHECK10-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15789 // CHECK10-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
15790 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
15791 // CHECK10-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
15792 // CHECK10-NEXT:    store i8* null, i8** [[TMP9]], align 8
15793 // CHECK10-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
15794 // CHECK10-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
15795 // CHECK10-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 8
15796 // CHECK10-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
15797 // CHECK10-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
15798 // CHECK10-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 8
15799 // CHECK10-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
15800 // CHECK10-NEXT:    store i8* null, i8** [[TMP14]], align 8
15801 // CHECK10-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
15802 // CHECK10-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
15803 // CHECK10-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 8
15804 // CHECK10-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
15805 // CHECK10-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
15806 // CHECK10-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 8
15807 // CHECK10-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
15808 // CHECK10-NEXT:    store i8* null, i8** [[TMP19]], align 8
15809 // CHECK10-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
15810 // CHECK10-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
15811 // CHECK10-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 8
15812 // CHECK10-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
15813 // CHECK10-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
15814 // CHECK10-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 8
15815 // CHECK10-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
15816 // CHECK10-NEXT:    store i8* null, i8** [[TMP24]], align 8
15817 // CHECK10-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15818 // CHECK10-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15819 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
15820 // CHECK10-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
15821 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15822 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
15823 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15824 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15825 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15826 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15827 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
15828 // CHECK10-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
15829 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
15830 // CHECK10-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15831 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
15832 // CHECK10-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
15833 // CHECK10:       omp_offload.failed:
15834 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
15835 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT]]
15836 // CHECK10:       omp_offload.cont:
15837 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
15838 // CHECK10-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
15839 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
15840 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
15841 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 8
15842 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 8
15843 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 8
15844 // CHECK10-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
15845 // CHECK10-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
15846 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
15847 // CHECK10-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
15848 // CHECK10-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
15849 // CHECK10-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
15850 // CHECK10-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
15851 // CHECK10-NEXT:    store i8* null, i8** [[TMP42]], align 8
15852 // CHECK10-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
15853 // CHECK10-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
15854 // CHECK10-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 8
15855 // CHECK10-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
15856 // CHECK10-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
15857 // CHECK10-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 8
15858 // CHECK10-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
15859 // CHECK10-NEXT:    store i8* null, i8** [[TMP47]], align 8
15860 // CHECK10-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
15861 // CHECK10-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
15862 // CHECK10-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 8
15863 // CHECK10-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
15864 // CHECK10-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
15865 // CHECK10-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 8
15866 // CHECK10-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
15867 // CHECK10-NEXT:    store i8* null, i8** [[TMP52]], align 8
15868 // CHECK10-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
15869 // CHECK10-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
15870 // CHECK10-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 8
15871 // CHECK10-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
15872 // CHECK10-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
15873 // CHECK10-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 8
15874 // CHECK10-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
15875 // CHECK10-NEXT:    store i8* null, i8** [[TMP57]], align 8
15876 // CHECK10-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
15877 // CHECK10-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
15878 // CHECK10-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
15879 // CHECK10-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
15880 // CHECK10-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
15881 // CHECK10-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
15882 // CHECK10-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
15883 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
15884 // CHECK10-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
15885 // CHECK10-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
15886 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
15887 // CHECK10-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
15888 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
15889 // CHECK10-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15890 // CHECK10-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
15891 // CHECK10-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
15892 // CHECK10:       omp_offload.failed15:
15893 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
15894 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
15895 // CHECK10:       omp_offload.cont16:
15896 // CHECK10-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
15897 // CHECK10-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
15898 // CHECK10-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
15899 // CHECK10-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
15900 // CHECK10-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
15901 // CHECK10-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
15902 // CHECK10-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
15903 // CHECK10-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
15904 // CHECK10-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 8
15905 // CHECK10-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 8
15906 // CHECK10-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 8
15907 // CHECK10-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
15908 // CHECK10-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
15909 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
15910 // CHECK10-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
15911 // CHECK10-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
15912 // CHECK10-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
15913 // CHECK10-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
15914 // CHECK10-NEXT:    store i8* null, i8** [[TMP77]], align 8
15915 // CHECK10-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
15916 // CHECK10-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
15917 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
15918 // CHECK10-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
15919 // CHECK10-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
15920 // CHECK10-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
15921 // CHECK10-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
15922 // CHECK10-NEXT:    store i8* null, i8** [[TMP82]], align 8
15923 // CHECK10-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
15924 // CHECK10-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
15925 // CHECK10-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 8
15926 // CHECK10-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
15927 // CHECK10-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
15928 // CHECK10-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 8
15929 // CHECK10-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
15930 // CHECK10-NEXT:    store i8* null, i8** [[TMP87]], align 8
15931 // CHECK10-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
15932 // CHECK10-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
15933 // CHECK10-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 8
15934 // CHECK10-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
15935 // CHECK10-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
15936 // CHECK10-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 8
15937 // CHECK10-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
15938 // CHECK10-NEXT:    store i8* null, i8** [[TMP92]], align 8
15939 // CHECK10-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
15940 // CHECK10-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
15941 // CHECK10-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 8
15942 // CHECK10-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
15943 // CHECK10-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
15944 // CHECK10-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 8
15945 // CHECK10-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
15946 // CHECK10-NEXT:    store i8* null, i8** [[TMP97]], align 8
15947 // CHECK10-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
15948 // CHECK10-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
15949 // CHECK10-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
15950 // CHECK10-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
15951 // CHECK10-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
15952 // CHECK10-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
15953 // CHECK10-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
15954 // CHECK10-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
15955 // CHECK10-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
15956 // CHECK10-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
15957 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
15958 // CHECK10-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
15959 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
15960 // CHECK10-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
15961 // CHECK10-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
15962 // CHECK10-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
15963 // CHECK10:       omp_offload.failed30:
15964 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP67]], i64 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
15965 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
15966 // CHECK10:       omp_offload.cont31:
15967 // CHECK10-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
15968 // CHECK10-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
15969 // CHECK10-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
15970 // CHECK10-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
15971 // CHECK10-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 8
15972 // CHECK10-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 8
15973 // CHECK10-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 8
15974 // CHECK10-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
15975 // CHECK10-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
15976 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
15977 // CHECK10-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
15978 // CHECK10-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
15979 // CHECK10-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
15980 // CHECK10-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
15981 // CHECK10-NEXT:    store i8* null, i8** [[TMP115]], align 8
15982 // CHECK10-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
15983 // CHECK10-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
15984 // CHECK10-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 8
15985 // CHECK10-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
15986 // CHECK10-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
15987 // CHECK10-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 8
15988 // CHECK10-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
15989 // CHECK10-NEXT:    store i8* null, i8** [[TMP120]], align 8
15990 // CHECK10-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
15991 // CHECK10-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
15992 // CHECK10-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 8
15993 // CHECK10-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
15994 // CHECK10-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
15995 // CHECK10-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 8
15996 // CHECK10-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
15997 // CHECK10-NEXT:    store i8* null, i8** [[TMP125]], align 8
15998 // CHECK10-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
15999 // CHECK10-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
16000 // CHECK10-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 8
16001 // CHECK10-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
16002 // CHECK10-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
16003 // CHECK10-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 8
16004 // CHECK10-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
16005 // CHECK10-NEXT:    store i8* null, i8** [[TMP130]], align 8
16006 // CHECK10-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
16007 // CHECK10-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
16008 // CHECK10-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
16009 // CHECK10-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
16010 // CHECK10-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
16011 // CHECK10-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
16012 // CHECK10-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
16013 // CHECK10-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
16014 // CHECK10-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
16015 // CHECK10-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
16016 // CHECK10-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
16017 // CHECK10-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
16018 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
16019 // CHECK10-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16020 // CHECK10-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
16021 // CHECK10-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
16022 // CHECK10:       omp_offload.failed44:
16023 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
16024 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
16025 // CHECK10:       omp_offload.cont45:
16026 // CHECK10-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
16027 // CHECK10-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
16028 // CHECK10-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
16029 // CHECK10-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
16030 // CHECK10-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
16031 // CHECK10-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
16032 // CHECK10-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
16033 // CHECK10-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
16034 // CHECK10-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 8
16035 // CHECK10-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 8
16036 // CHECK10-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 8
16037 // CHECK10-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
16038 // CHECK10-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
16039 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
16040 // CHECK10-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
16041 // CHECK10-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
16042 // CHECK10-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
16043 // CHECK10-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
16044 // CHECK10-NEXT:    store i8* null, i8** [[TMP150]], align 8
16045 // CHECK10-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
16046 // CHECK10-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
16047 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
16048 // CHECK10-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
16049 // CHECK10-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
16050 // CHECK10-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
16051 // CHECK10-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
16052 // CHECK10-NEXT:    store i8* null, i8** [[TMP155]], align 8
16053 // CHECK10-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
16054 // CHECK10-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
16055 // CHECK10-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 8
16056 // CHECK10-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
16057 // CHECK10-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
16058 // CHECK10-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 8
16059 // CHECK10-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
16060 // CHECK10-NEXT:    store i8* null, i8** [[TMP160]], align 8
16061 // CHECK10-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
16062 // CHECK10-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
16063 // CHECK10-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 8
16064 // CHECK10-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
16065 // CHECK10-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
16066 // CHECK10-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 8
16067 // CHECK10-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
16068 // CHECK10-NEXT:    store i8* null, i8** [[TMP165]], align 8
16069 // CHECK10-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
16070 // CHECK10-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
16071 // CHECK10-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 8
16072 // CHECK10-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
16073 // CHECK10-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
16074 // CHECK10-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 8
16075 // CHECK10-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
16076 // CHECK10-NEXT:    store i8* null, i8** [[TMP170]], align 8
16077 // CHECK10-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
16078 // CHECK10-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
16079 // CHECK10-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
16080 // CHECK10-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
16081 // CHECK10-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
16082 // CHECK10-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
16083 // CHECK10-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
16084 // CHECK10-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
16085 // CHECK10-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
16086 // CHECK10-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
16087 // CHECK10-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
16088 // CHECK10-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
16089 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
16090 // CHECK10-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16091 // CHECK10-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
16092 // CHECK10-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
16093 // CHECK10:       omp_offload.failed60:
16094 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP140]], i64 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
16095 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
16096 // CHECK10:       omp_offload.cont61:
16097 // CHECK10-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
16098 // CHECK10-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
16099 // CHECK10-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
16100 // CHECK10-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
16101 // CHECK10-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 8
16102 // CHECK10-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 8
16103 // CHECK10-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 8
16104 // CHECK10-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
16105 // CHECK10-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
16106 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
16107 // CHECK10-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
16108 // CHECK10-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
16109 // CHECK10-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
16110 // CHECK10-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
16111 // CHECK10-NEXT:    store i8* null, i8** [[TMP188]], align 8
16112 // CHECK10-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
16113 // CHECK10-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
16114 // CHECK10-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 8
16115 // CHECK10-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
16116 // CHECK10-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
16117 // CHECK10-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 8
16118 // CHECK10-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
16119 // CHECK10-NEXT:    store i8* null, i8** [[TMP193]], align 8
16120 // CHECK10-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
16121 // CHECK10-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
16122 // CHECK10-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 8
16123 // CHECK10-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
16124 // CHECK10-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
16125 // CHECK10-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 8
16126 // CHECK10-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
16127 // CHECK10-NEXT:    store i8* null, i8** [[TMP198]], align 8
16128 // CHECK10-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
16129 // CHECK10-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
16130 // CHECK10-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 8
16131 // CHECK10-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
16132 // CHECK10-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
16133 // CHECK10-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 8
16134 // CHECK10-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
16135 // CHECK10-NEXT:    store i8* null, i8** [[TMP203]], align 8
16136 // CHECK10-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
16137 // CHECK10-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
16138 // CHECK10-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
16139 // CHECK10-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
16140 // CHECK10-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
16141 // CHECK10-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
16142 // CHECK10-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
16143 // CHECK10-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
16144 // CHECK10-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
16145 // CHECK10-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
16146 // CHECK10-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
16147 // CHECK10-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
16148 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
16149 // CHECK10-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16150 // CHECK10-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
16151 // CHECK10-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
16152 // CHECK10:       omp_offload.failed74:
16153 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
16154 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
16155 // CHECK10:       omp_offload.cont75:
16156 // CHECK10-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
16157 // CHECK10-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
16158 // CHECK10-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
16159 // CHECK10-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
16160 // CHECK10-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
16161 // CHECK10-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
16162 // CHECK10-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
16163 // CHECK10-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
16164 // CHECK10-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 8
16165 // CHECK10-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 8
16166 // CHECK10-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 8
16167 // CHECK10-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
16168 // CHECK10-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
16169 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
16170 // CHECK10-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
16171 // CHECK10-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
16172 // CHECK10-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
16173 // CHECK10-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
16174 // CHECK10-NEXT:    store i8* null, i8** [[TMP223]], align 8
16175 // CHECK10-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
16176 // CHECK10-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
16177 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
16178 // CHECK10-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
16179 // CHECK10-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
16180 // CHECK10-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
16181 // CHECK10-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
16182 // CHECK10-NEXT:    store i8* null, i8** [[TMP228]], align 8
16183 // CHECK10-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
16184 // CHECK10-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
16185 // CHECK10-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 8
16186 // CHECK10-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
16187 // CHECK10-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
16188 // CHECK10-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 8
16189 // CHECK10-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
16190 // CHECK10-NEXT:    store i8* null, i8** [[TMP233]], align 8
16191 // CHECK10-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
16192 // CHECK10-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
16193 // CHECK10-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 8
16194 // CHECK10-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
16195 // CHECK10-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
16196 // CHECK10-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 8
16197 // CHECK10-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
16198 // CHECK10-NEXT:    store i8* null, i8** [[TMP238]], align 8
16199 // CHECK10-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
16200 // CHECK10-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
16201 // CHECK10-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 8
16202 // CHECK10-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
16203 // CHECK10-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
16204 // CHECK10-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 8
16205 // CHECK10-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
16206 // CHECK10-NEXT:    store i8* null, i8** [[TMP243]], align 8
16207 // CHECK10-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
16208 // CHECK10-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
16209 // CHECK10-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
16210 // CHECK10-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
16211 // CHECK10-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
16212 // CHECK10-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
16213 // CHECK10-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
16214 // CHECK10-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
16215 // CHECK10-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
16216 // CHECK10-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
16217 // CHECK10-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
16218 // CHECK10-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
16219 // CHECK10-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
16220 // CHECK10-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
16221 // CHECK10-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
16222 // CHECK10-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
16223 // CHECK10:       omp_offload.failed90:
16224 // CHECK10-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP213]], i64 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
16225 // CHECK10-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
16226 // CHECK10:       omp_offload.cont91:
16227 // CHECK10-NEXT:    ret i32 0
16228 //
16229 //
16230 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
16231 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16232 // CHECK10-NEXT:  entry:
16233 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16234 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16235 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16236 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16237 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16238 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16239 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16240 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16241 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16242 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16243 // CHECK10-NEXT:    ret void
16244 //
16245 //
16246 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..26
16247 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16248 // CHECK10-NEXT:  entry:
16249 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16250 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16251 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16252 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16253 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16254 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16255 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16256 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16257 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16258 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16259 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16260 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16261 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16262 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16263 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16264 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16265 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16266 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16267 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16268 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16269 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16270 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16271 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16272 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16273 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16274 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16275 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16276 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16277 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16278 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16279 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16280 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16281 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16282 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16283 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16284 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16285 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16286 // CHECK10:       omp.precond.then:
16287 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16288 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16289 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
16290 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16291 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16292 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16293 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
16294 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16295 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16296 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16297 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
16298 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16299 // CHECK10:       cond.true:
16300 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16301 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16302 // CHECK10:       cond.false:
16303 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16304 // CHECK10-NEXT:    br label [[COND_END]]
16305 // CHECK10:       cond.end:
16306 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
16307 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16308 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16309 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
16310 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16311 // CHECK10:       omp.inner.for.cond:
16312 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16313 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
16314 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16315 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16316 // CHECK10:       omp.inner.for.body:
16317 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
16318 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
16319 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
16320 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16321 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
16322 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16323 // CHECK10:       omp.inner.for.inc:
16324 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16325 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
16326 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
16327 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
16328 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
16329 // CHECK10:       omp.inner.for.end:
16330 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16331 // CHECK10:       omp.loop.exit:
16332 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16333 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
16334 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
16335 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16336 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
16337 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16338 // CHECK10:       .omp.final.then:
16339 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16340 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
16341 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16342 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
16343 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
16344 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
16345 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16346 // CHECK10:       .omp.final.done:
16347 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16348 // CHECK10:       omp.precond.end:
16349 // CHECK10-NEXT:    ret void
16350 //
16351 //
16352 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..27
16353 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16354 // CHECK10-NEXT:  entry:
16355 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16356 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16357 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16358 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16359 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16360 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16361 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16362 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16363 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16364 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16365 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16366 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16367 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16368 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16369 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16370 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16371 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16372 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16373 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16374 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16375 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16376 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16377 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16378 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16379 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16380 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16381 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16382 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16383 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16384 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16385 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16386 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16387 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16388 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16389 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16390 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16391 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16392 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16393 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16394 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16395 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16396 // CHECK10:       omp.precond.then:
16397 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16398 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16399 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16400 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16401 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16402 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16403 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16404 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16405 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16406 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16407 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16408 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16409 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16410 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16411 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16412 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16413 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16414 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16415 // CHECK10:       cond.true:
16416 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16417 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16418 // CHECK10:       cond.false:
16419 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16420 // CHECK10-NEXT:    br label [[COND_END]]
16421 // CHECK10:       cond.end:
16422 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16423 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16424 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16425 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16426 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16427 // CHECK10:       omp.inner.for.cond:
16428 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16429 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
16430 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16431 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16432 // CHECK10:       omp.inner.for.body:
16433 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16434 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16435 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16436 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
16437 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
16438 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16439 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16440 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16441 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
16442 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
16443 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16444 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16445 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16446 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
16447 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16448 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
16449 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
16450 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16451 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16452 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
16453 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16454 // CHECK10:       omp.body.continue:
16455 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16456 // CHECK10:       omp.inner.for.inc:
16457 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16458 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16459 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
16460 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
16461 // CHECK10:       omp.inner.for.end:
16462 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16463 // CHECK10:       omp.loop.exit:
16464 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16465 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
16466 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
16467 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16468 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
16469 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16470 // CHECK10:       .omp.final.then:
16471 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16472 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
16473 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16474 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
16475 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
16476 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
16477 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16478 // CHECK10:       .omp.final.done:
16479 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16480 // CHECK10:       omp.precond.end:
16481 // CHECK10-NEXT:    ret void
16482 //
16483 //
16484 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
16485 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16486 // CHECK10-NEXT:  entry:
16487 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16488 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16489 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16490 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16491 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16492 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16493 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16494 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16495 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16496 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16497 // CHECK10-NEXT:    ret void
16498 //
16499 //
16500 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..30
16501 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16502 // CHECK10-NEXT:  entry:
16503 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16504 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16505 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16506 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16507 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16508 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16509 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16510 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16511 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16512 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16513 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16514 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16515 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16516 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16517 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16518 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16519 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16520 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16521 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16522 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16523 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16524 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16525 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16526 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16527 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16528 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16529 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16530 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16531 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16532 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16533 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16534 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16535 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16536 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16537 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16538 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16539 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16540 // CHECK10:       omp.precond.then:
16541 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16542 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16543 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
16544 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16545 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16546 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16547 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
16548 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16549 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16550 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16551 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
16552 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16553 // CHECK10:       cond.true:
16554 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16555 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16556 // CHECK10:       cond.false:
16557 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16558 // CHECK10-NEXT:    br label [[COND_END]]
16559 // CHECK10:       cond.end:
16560 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
16561 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16562 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16563 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
16564 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16565 // CHECK10:       omp.inner.for.cond:
16566 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16567 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
16568 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16569 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16570 // CHECK10:       omp.inner.for.body:
16571 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
16572 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
16573 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
16574 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16575 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
16576 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16577 // CHECK10:       omp.inner.for.inc:
16578 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16579 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
16580 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
16581 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
16582 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
16583 // CHECK10:       omp.inner.for.end:
16584 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16585 // CHECK10:       omp.loop.exit:
16586 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16587 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
16588 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
16589 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16590 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
16591 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16592 // CHECK10:       .omp.final.then:
16593 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16594 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
16595 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16596 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
16597 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
16598 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
16599 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16600 // CHECK10:       .omp.final.done:
16601 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16602 // CHECK10:       omp.precond.end:
16603 // CHECK10-NEXT:    ret void
16604 //
16605 //
16606 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..31
16607 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16608 // CHECK10-NEXT:  entry:
16609 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16610 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16611 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16612 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16613 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16614 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16615 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16616 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16617 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16618 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16619 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16620 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16621 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16622 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16623 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16624 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16625 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16626 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16627 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16628 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16629 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16630 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16631 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16632 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16633 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16634 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16635 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16636 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16637 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16638 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16639 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16640 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16641 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16642 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16643 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16644 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16645 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16646 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16647 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16648 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16649 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16650 // CHECK10:       omp.precond.then:
16651 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16652 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16653 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16654 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16655 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16656 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16657 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16658 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16659 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16660 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16661 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16662 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16663 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16664 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16665 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16666 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16667 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16668 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16669 // CHECK10:       cond.true:
16670 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16671 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16672 // CHECK10:       cond.false:
16673 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16674 // CHECK10-NEXT:    br label [[COND_END]]
16675 // CHECK10:       cond.end:
16676 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16677 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16678 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16679 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16680 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16681 // CHECK10:       omp.inner.for.cond:
16682 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16683 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
16684 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16685 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16686 // CHECK10:       omp.inner.for.body:
16687 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16688 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16689 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16690 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
16691 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
16692 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16693 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16694 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16695 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
16696 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
16697 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16698 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16699 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16700 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
16701 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16702 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
16703 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
16704 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16705 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16706 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
16707 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16708 // CHECK10:       omp.body.continue:
16709 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16710 // CHECK10:       omp.inner.for.inc:
16711 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16712 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16713 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
16714 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
16715 // CHECK10:       omp.inner.for.end:
16716 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16717 // CHECK10:       omp.loop.exit:
16718 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16719 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
16720 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
16721 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16722 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
16723 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16724 // CHECK10:       .omp.final.then:
16725 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16726 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
16727 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16728 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
16729 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
16730 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
16731 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16732 // CHECK10:       .omp.final.done:
16733 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16734 // CHECK10:       omp.precond.end:
16735 // CHECK10-NEXT:    ret void
16736 //
16737 //
16738 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
16739 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
16740 // CHECK10-NEXT:  entry:
16741 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
16742 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
16743 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
16744 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
16745 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
16746 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
16747 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
16748 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
16749 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
16750 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
16751 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
16752 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
16753 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
16754 // CHECK10-NEXT:    ret void
16755 //
16756 //
16757 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..34
16758 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16759 // CHECK10-NEXT:  entry:
16760 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16761 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16762 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
16763 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16764 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16765 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16766 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16767 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16768 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16769 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16770 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16771 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16772 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
16773 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
16774 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16775 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16776 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
16777 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16778 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16779 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
16780 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16781 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16782 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16783 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16784 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
16785 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16786 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16787 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16788 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16789 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
16790 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
16791 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16792 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
16793 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16794 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16795 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16796 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16797 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16798 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
16799 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16800 // CHECK10:       omp.precond.then:
16801 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
16802 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16803 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
16804 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16805 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16806 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
16807 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16808 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16809 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
16810 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16811 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16812 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16813 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16814 // CHECK10:       cond.true:
16815 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16816 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16817 // CHECK10:       cond.false:
16818 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
16819 // CHECK10-NEXT:    br label [[COND_END]]
16820 // CHECK10:       cond.end:
16821 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16822 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
16823 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
16824 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16825 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16826 // CHECK10:       omp.inner.for.cond:
16827 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16828 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16829 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
16830 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
16831 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16832 // CHECK10:       omp.inner.for.body:
16833 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16834 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
16835 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16836 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
16837 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
16838 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16839 // CHECK10:       omp.inner.for.inc:
16840 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16841 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16842 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
16843 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16844 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16845 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16846 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
16847 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16848 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16849 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
16850 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
16851 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16852 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16853 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16854 // CHECK10-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
16855 // CHECK10-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
16856 // CHECK10:       cond.true10:
16857 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
16858 // CHECK10-NEXT:    br label [[COND_END12:%.*]]
16859 // CHECK10:       cond.false11:
16860 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16861 // CHECK10-NEXT:    br label [[COND_END12]]
16862 // CHECK10:       cond.end12:
16863 // CHECK10-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
16864 // CHECK10-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
16865 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
16866 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
16867 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
16868 // CHECK10:       omp.inner.for.end:
16869 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
16870 // CHECK10:       omp.loop.exit:
16871 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16872 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
16873 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
16874 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
16875 // CHECK10-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
16876 // CHECK10-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16877 // CHECK10:       .omp.final.then:
16878 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16879 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
16880 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
16881 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
16882 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
16883 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
16884 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
16885 // CHECK10:       .omp.final.done:
16886 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
16887 // CHECK10:       omp.precond.end:
16888 // CHECK10-NEXT:    ret void
16889 //
16890 //
16891 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..35
16892 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
16893 // CHECK10-NEXT:  entry:
16894 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
16895 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
16896 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
16897 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
16898 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
16899 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
16900 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
16901 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
16902 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16903 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16904 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16905 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16906 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
16907 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16908 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16909 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16910 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16911 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
16912 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
16913 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
16914 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16915 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16916 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
16917 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
16918 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
16919 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
16920 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
16921 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
16922 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
16923 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
16924 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
16925 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
16926 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16927 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
16928 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16929 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16930 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16931 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
16932 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16933 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16934 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
16935 // CHECK10:       omp.precond.then:
16936 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16937 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16938 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
16939 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
16940 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
16941 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
16942 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
16943 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
16944 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
16945 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
16946 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
16947 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
16948 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
16949 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
16950 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16951 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16952 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
16953 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16954 // CHECK10:       cond.true:
16955 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16956 // CHECK10-NEXT:    br label [[COND_END:%.*]]
16957 // CHECK10:       cond.false:
16958 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16959 // CHECK10-NEXT:    br label [[COND_END]]
16960 // CHECK10:       cond.end:
16961 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
16962 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
16963 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16964 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
16965 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16966 // CHECK10:       omp.inner.for.cond:
16967 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16968 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
16969 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
16970 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16971 // CHECK10:       omp.inner.for.body:
16972 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16973 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
16974 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16975 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
16976 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
16977 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16978 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
16979 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
16980 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
16981 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
16982 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16983 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
16984 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
16985 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
16986 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
16987 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
16988 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
16989 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
16990 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
16991 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
16992 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16993 // CHECK10:       omp.body.continue:
16994 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16995 // CHECK10:       omp.inner.for.inc:
16996 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16997 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
16998 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
16999 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
17000 // CHECK10:       omp.inner.for.end:
17001 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17002 // CHECK10:       omp.loop.exit:
17003 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17004 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
17005 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
17006 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17007 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
17008 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17009 // CHECK10:       .omp.final.then:
17010 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17011 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
17012 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
17013 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
17014 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
17015 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
17016 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17017 // CHECK10:       .omp.final.done:
17018 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17019 // CHECK10:       omp.precond.end:
17020 // CHECK10-NEXT:    ret void
17021 //
17022 //
17023 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
17024 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17025 // CHECK10-NEXT:  entry:
17026 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17027 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17028 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17029 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17030 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17031 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17032 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17033 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17034 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17035 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17036 // CHECK10-NEXT:    ret void
17037 //
17038 //
17039 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..38
17040 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17041 // CHECK10-NEXT:  entry:
17042 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17043 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17044 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17045 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17046 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17047 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17048 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17049 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17050 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17051 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17052 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17053 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17054 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17055 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17056 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17057 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
17058 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17059 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17060 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17061 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17062 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17063 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17064 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17065 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17066 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17067 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17068 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17069 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17070 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17071 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17072 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17073 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17074 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17075 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17076 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17077 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17078 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17079 // CHECK10:       omp.precond.then:
17080 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17081 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17082 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
17083 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17084 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17085 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17086 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17087 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17088 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17089 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17090 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17091 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17092 // CHECK10:       cond.true:
17093 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17094 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17095 // CHECK10:       cond.false:
17096 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17097 // CHECK10-NEXT:    br label [[COND_END]]
17098 // CHECK10:       cond.end:
17099 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17100 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17101 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17102 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17103 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17104 // CHECK10:       omp.inner.for.cond:
17105 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17106 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
17107 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17108 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17109 // CHECK10:       omp.inner.for.body:
17110 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
17111 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
17112 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
17113 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17114 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
17115 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17116 // CHECK10:       omp.inner.for.inc:
17117 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17118 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
17119 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
17120 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
17121 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
17122 // CHECK10:       omp.inner.for.end:
17123 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17124 // CHECK10:       omp.loop.exit:
17125 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17126 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
17127 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
17128 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17129 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
17130 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17131 // CHECK10:       .omp.final.then:
17132 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17133 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
17134 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17135 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17136 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17137 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17138 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17139 // CHECK10:       .omp.final.done:
17140 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17141 // CHECK10:       omp.precond.end:
17142 // CHECK10-NEXT:    ret void
17143 //
17144 //
17145 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..39
17146 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17147 // CHECK10-NEXT:  entry:
17148 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17149 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17150 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17151 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17152 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17153 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17154 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17155 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17156 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17157 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17158 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17159 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17160 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17161 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17162 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17163 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17164 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17165 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17166 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17167 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17168 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17169 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17170 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17171 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17172 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17173 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17174 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17175 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17176 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17177 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17178 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17179 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17180 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17181 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17182 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17183 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17184 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17185 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17186 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17187 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17188 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17189 // CHECK10:       omp.precond.then:
17190 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17191 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17192 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17193 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17194 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
17195 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17196 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
17197 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
17198 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
17199 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17200 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17201 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17202 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17203 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17204 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17205 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17206 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17207 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17208 // CHECK10:       cond.true:
17209 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17210 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17211 // CHECK10:       cond.false:
17212 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17213 // CHECK10-NEXT:    br label [[COND_END]]
17214 // CHECK10:       cond.end:
17215 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17216 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17217 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17218 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17219 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17220 // CHECK10:       omp.inner.for.cond:
17221 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17222 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
17223 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17224 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17225 // CHECK10:       omp.inner.for.body:
17226 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17227 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
17228 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17229 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
17230 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
17231 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17232 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
17233 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
17234 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
17235 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
17236 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17237 // CHECK10-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
17238 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
17239 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
17240 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
17241 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
17242 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
17243 // CHECK10-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
17244 // CHECK10-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
17245 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
17246 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17247 // CHECK10:       omp.body.continue:
17248 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17249 // CHECK10:       omp.inner.for.inc:
17250 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17251 // CHECK10-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
17252 // CHECK10-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
17253 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
17254 // CHECK10:       omp.inner.for.end:
17255 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17256 // CHECK10:       omp.loop.exit:
17257 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17258 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
17259 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
17260 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17261 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
17262 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17263 // CHECK10:       .omp.final.then:
17264 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17265 // CHECK10-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
17266 // CHECK10-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
17267 // CHECK10-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
17268 // CHECK10-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
17269 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
17270 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17271 // CHECK10:       .omp.final.done:
17272 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17273 // CHECK10:       omp.precond.end:
17274 // CHECK10-NEXT:    ret void
17275 //
17276 //
17277 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
17278 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17279 // CHECK10-NEXT:  entry:
17280 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
17281 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17282 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17283 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17284 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17285 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
17286 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17287 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17288 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17289 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17290 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
17291 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17292 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17293 // CHECK10-NEXT:    ret void
17294 //
17295 //
17296 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..42
17297 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17298 // CHECK10-NEXT:  entry:
17299 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17300 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17301 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
17302 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17303 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17304 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17305 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17306 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17307 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17308 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17309 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17310 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17311 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17312 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17313 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17314 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17315 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17316 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17317 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
17318 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17319 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17320 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
17321 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17322 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17323 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17324 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17325 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
17326 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17327 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17328 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17329 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17330 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
17331 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
17332 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
17333 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17334 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17335 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
17336 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17337 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17338 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17339 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17340 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17341 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
17342 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17343 // CHECK10:       omp.precond.then:
17344 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17345 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17346 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
17347 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17348 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17349 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17350 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17351 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17352 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17353 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17354 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17355 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17356 // CHECK10:       cond.true:
17357 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17358 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17359 // CHECK10:       cond.false:
17360 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17361 // CHECK10-NEXT:    br label [[COND_END]]
17362 // CHECK10:       cond.end:
17363 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17364 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17365 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17366 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17367 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17368 // CHECK10:       omp.inner.for.cond:
17369 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17370 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
17371 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17372 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17373 // CHECK10:       omp.inner.for.body:
17374 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
17375 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17376 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
17377 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
17378 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
17379 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
17380 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
17381 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
17382 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
17383 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17384 // CHECK10:       omp.inner.for.inc:
17385 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17386 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
17387 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
17388 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
17389 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
17390 // CHECK10:       omp.inner.for.end:
17391 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17392 // CHECK10:       omp.loop.exit:
17393 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17394 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
17395 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
17396 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17397 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
17398 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17399 // CHECK10:       .omp.final.then:
17400 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17401 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
17402 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17403 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
17404 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
17405 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
17406 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17407 // CHECK10:       .omp.final.done:
17408 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17409 // CHECK10:       omp.precond.end:
17410 // CHECK10-NEXT:    ret void
17411 //
17412 //
17413 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..43
17414 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
17415 // CHECK10-NEXT:  entry:
17416 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17417 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17418 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17419 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17420 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17421 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17422 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17423 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17424 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17425 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17426 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17427 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17428 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17429 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17430 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17431 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17432 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17433 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17434 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
17435 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17436 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17437 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17438 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17439 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17440 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17441 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17442 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17443 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17444 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17445 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17446 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17447 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17448 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17449 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17450 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17451 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17452 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17453 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17454 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17455 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17456 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17457 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17458 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17459 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17460 // CHECK10:       omp.precond.then:
17461 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17462 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17463 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17464 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17465 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
17466 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17467 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
17468 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
17469 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
17470 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17471 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17472 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
17473 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17474 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
17475 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
17476 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17477 // CHECK10:       omp.dispatch.cond:
17478 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17479 // CHECK10-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP13]] to i64
17480 // CHECK10-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17481 // CHECK10-NEXT:    [[CMP8:%.*]] = icmp ugt i64 [[CONV7]], [[TMP14]]
17482 // CHECK10-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17483 // CHECK10:       cond.true:
17484 // CHECK10-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17485 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17486 // CHECK10:       cond.false:
17487 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17488 // CHECK10-NEXT:    [[CONV9:%.*]] = sext i32 [[TMP16]] to i64
17489 // CHECK10-NEXT:    br label [[COND_END]]
17490 // CHECK10:       cond.end:
17491 // CHECK10-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP15]], [[COND_TRUE]] ], [ [[CONV9]], [[COND_FALSE]] ]
17492 // CHECK10-NEXT:    [[CONV10:%.*]] = trunc i64 [[COND]] to i32
17493 // CHECK10-NEXT:    store i32 [[CONV10]], i32* [[DOTOMP_UB]], align 4
17494 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17495 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
17496 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17497 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17498 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
17499 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17500 // CHECK10:       omp.dispatch.body:
17501 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17502 // CHECK10:       omp.inner.for.cond:
17503 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17504 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
17505 // CHECK10-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
17506 // CHECK10-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17507 // CHECK10:       omp.inner.for.body:
17508 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17509 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
17510 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17511 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
17512 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
17513 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17514 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
17515 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
17516 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
17517 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
17518 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17519 // CHECK10-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP27]] to i64
17520 // CHECK10-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM13]]
17521 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !89
17522 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
17523 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
17524 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
17525 // CHECK10-NEXT:    [[IDXPROM16:%.*]] = sext i32 [[TMP30]] to i64
17526 // CHECK10-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM16]]
17527 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX17]], align 4, !llvm.access.group !89
17528 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17529 // CHECK10:       omp.body.continue:
17530 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17531 // CHECK10:       omp.inner.for.inc:
17532 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17533 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP31]], 1
17534 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
17535 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
17536 // CHECK10:       omp.inner.for.end:
17537 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17538 // CHECK10:       omp.dispatch.inc:
17539 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17540 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17541 // CHECK10-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
17542 // CHECK10-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_LB]], align 4
17543 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17544 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17545 // CHECK10-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
17546 // CHECK10-NEXT:    store i32 [[ADD20]], i32* [[DOTOMP_UB]], align 4
17547 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
17548 // CHECK10:       omp.dispatch.end:
17549 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17550 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
17551 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
17552 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17553 // CHECK10-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
17554 // CHECK10-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17555 // CHECK10:       .omp.final.then:
17556 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17557 // CHECK10-NEXT:    [[SUB21:%.*]] = sub nsw i32 [[TMP40]], 0
17558 // CHECK10-NEXT:    [[DIV22:%.*]] = sdiv i32 [[SUB21]], 1
17559 // CHECK10-NEXT:    [[MUL23:%.*]] = mul nsw i32 [[DIV22]], 1
17560 // CHECK10-NEXT:    [[ADD24:%.*]] = add nsw i32 0, [[MUL23]]
17561 // CHECK10-NEXT:    store i32 [[ADD24]], i32* [[I6]], align 4
17562 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17563 // CHECK10:       .omp.final.done:
17564 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17565 // CHECK10:       omp.precond.end:
17566 // CHECK10-NEXT:    ret void
17567 //
17568 //
17569 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
17570 // CHECK10-SAME: (i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17571 // CHECK10-NEXT:  entry:
17572 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17573 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17574 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17575 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17576 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17577 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17578 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17579 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17580 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17581 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17582 // CHECK10-NEXT:    ret void
17583 //
17584 //
17585 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..46
17586 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17587 // CHECK10-NEXT:  entry:
17588 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17589 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17590 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17591 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17592 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17593 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17594 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17595 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17596 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17597 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17598 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17599 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17600 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17601 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17602 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17603 // CHECK10-NEXT:    [[I3:%.*]] = alloca i32, align 4
17604 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17605 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17606 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17607 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17608 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17609 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17610 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17611 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17612 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17613 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17614 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17615 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17616 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17617 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17618 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17619 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17620 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17621 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17622 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17623 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17624 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17625 // CHECK10:       omp.precond.then:
17626 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17627 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17628 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
17629 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17630 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17631 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17632 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
17633 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17634 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17635 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17636 // CHECK10-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
17637 // CHECK10-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17638 // CHECK10:       cond.true:
17639 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17640 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17641 // CHECK10:       cond.false:
17642 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17643 // CHECK10-NEXT:    br label [[COND_END]]
17644 // CHECK10:       cond.end:
17645 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
17646 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17647 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17648 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
17649 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17650 // CHECK10:       omp.inner.for.cond:
17651 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17652 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
17653 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17654 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17655 // CHECK10:       omp.inner.for.body:
17656 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
17657 // CHECK10-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
17658 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
17659 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17660 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
17661 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17662 // CHECK10:       omp.inner.for.inc:
17663 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17664 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
17665 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
17666 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
17667 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
17668 // CHECK10:       omp.inner.for.end:
17669 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17670 // CHECK10:       omp.loop.exit:
17671 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17672 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
17673 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
17674 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17675 // CHECK10-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
17676 // CHECK10-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17677 // CHECK10:       .omp.final.then:
17678 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17679 // CHECK10-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
17680 // CHECK10-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
17681 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
17682 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
17683 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
17684 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17685 // CHECK10:       .omp.final.done:
17686 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17687 // CHECK10:       omp.precond.end:
17688 // CHECK10-NEXT:    ret void
17689 //
17690 //
17691 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..47
17692 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17693 // CHECK10-NEXT:  entry:
17694 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17695 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17696 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17697 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17698 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17699 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17700 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17701 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17702 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17703 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17704 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17705 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17706 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17707 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17708 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17709 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17710 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17711 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17712 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17713 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17714 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17715 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17716 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17717 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17718 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17719 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17720 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17721 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17722 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17723 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17724 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17725 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17726 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17727 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17728 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17729 // CHECK10-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
17730 // CHECK10-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17731 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17732 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17733 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
17734 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17735 // CHECK10:       omp.precond.then:
17736 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17737 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17738 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
17739 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17740 // CHECK10-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
17741 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17742 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
17743 // CHECK10-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
17744 // CHECK10-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
17745 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17746 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17747 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17748 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17749 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17750 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
17751 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
17752 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17753 // CHECK10:       omp.dispatch.cond:
17754 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17755 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
17756 // CHECK10-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
17757 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
17758 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17759 // CHECK10:       omp.dispatch.body:
17760 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17761 // CHECK10-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
17762 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17763 // CHECK10:       omp.inner.for.cond:
17764 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17765 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
17766 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
17767 // CHECK10-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17768 // CHECK10:       omp.inner.for.body:
17769 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17770 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
17771 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17772 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
17773 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
17774 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17775 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
17776 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
17777 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
17778 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
17779 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17780 // CHECK10-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
17781 // CHECK10-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
17782 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
17783 // CHECK10-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
17784 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
17785 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
17786 // CHECK10-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
17787 // CHECK10-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
17788 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
17789 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17790 // CHECK10:       omp.body.continue:
17791 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17792 // CHECK10:       omp.inner.for.inc:
17793 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17794 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
17795 // CHECK10-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
17796 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
17797 // CHECK10:       omp.inner.for.end:
17798 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17799 // CHECK10:       omp.dispatch.inc:
17800 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
17801 // CHECK10:       omp.dispatch.end:
17802 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17803 // CHECK10-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
17804 // CHECK10-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17805 // CHECK10:       .omp.final.then:
17806 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17807 // CHECK10-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
17808 // CHECK10-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
17809 // CHECK10-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
17810 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
17811 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
17812 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17813 // CHECK10:       .omp.final.done:
17814 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17815 // CHECK10:       omp.precond.end:
17816 // CHECK10-NEXT:    ret void
17817 //
17818 //
17819 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
17820 // CHECK10-SAME: (i64 [[CH:%.*]], i64 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
17821 // CHECK10-NEXT:  entry:
17822 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
17823 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
17824 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
17825 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
17826 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
17827 // CHECK10-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
17828 // CHECK10-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
17829 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
17830 // CHECK10-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
17831 // CHECK10-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
17832 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
17833 // CHECK10-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
17834 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
17835 // CHECK10-NEXT:    ret void
17836 //
17837 //
17838 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..50
17839 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
17840 // CHECK10-NEXT:  entry:
17841 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17842 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17843 // CHECK10-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
17844 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17845 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17846 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17847 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17848 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17849 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17850 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17851 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17852 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17853 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17854 // CHECK10-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
17855 // CHECK10-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
17856 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17857 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17858 // CHECK10-NEXT:    [[I4:%.*]] = alloca i32, align 4
17859 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
17860 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17861 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17862 // CHECK10-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
17863 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17864 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17865 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17866 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17867 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
17868 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17869 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17870 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17871 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17872 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
17873 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
17874 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
17875 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17876 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17877 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
17878 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17879 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17880 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17881 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17882 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17883 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
17884 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
17885 // CHECK10:       omp.precond.then:
17886 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
17887 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17888 // CHECK10-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
17889 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17890 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17891 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17892 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
17893 // CHECK10-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17894 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17895 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17896 // CHECK10-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
17897 // CHECK10-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17898 // CHECK10:       cond.true:
17899 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17900 // CHECK10-NEXT:    br label [[COND_END:%.*]]
17901 // CHECK10:       cond.false:
17902 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
17903 // CHECK10-NEXT:    br label [[COND_END]]
17904 // CHECK10:       cond.end:
17905 // CHECK10-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
17906 // CHECK10-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
17907 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
17908 // CHECK10-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
17909 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17910 // CHECK10:       omp.inner.for.cond:
17911 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17912 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
17913 // CHECK10-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
17914 // CHECK10-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17915 // CHECK10:       omp.inner.for.body:
17916 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
17917 // CHECK10-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
17918 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
17919 // CHECK10-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
17920 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
17921 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
17922 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
17923 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
17924 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
17925 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17926 // CHECK10:       omp.inner.for.inc:
17927 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17928 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
17929 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
17930 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
17931 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
17932 // CHECK10:       omp.inner.for.end:
17933 // CHECK10-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17934 // CHECK10:       omp.loop.exit:
17935 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17936 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
17937 // CHECK10-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
17938 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17939 // CHECK10-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
17940 // CHECK10-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17941 // CHECK10:       .omp.final.then:
17942 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17943 // CHECK10-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
17944 // CHECK10-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
17945 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
17946 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
17947 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
17948 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17949 // CHECK10:       .omp.final.done:
17950 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
17951 // CHECK10:       omp.precond.end:
17952 // CHECK10-NEXT:    ret void
17953 //
17954 //
17955 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..51
17956 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i32** nonnull align 8 dereferenceable(8) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
17957 // CHECK10-NEXT:  entry:
17958 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17959 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17960 // CHECK10-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
17961 // CHECK10-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
17962 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
17963 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
17964 // CHECK10-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
17965 // CHECK10-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
17966 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17967 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17968 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17969 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17970 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17971 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
17972 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17973 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17974 // CHECK10-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17975 // CHECK10-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17976 // CHECK10-NEXT:    [[I6:%.*]] = alloca i32, align 4
17977 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17978 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17979 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
17980 // CHECK10-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
17981 // CHECK10-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
17982 // CHECK10-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
17983 // CHECK10-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
17984 // CHECK10-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
17985 // CHECK10-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17986 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
17987 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
17988 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
17989 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
17990 // CHECK10-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17991 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
17992 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17993 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17994 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
17995 // CHECK10-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
17996 // CHECK10-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
17997 // CHECK10-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17998 // CHECK10-NEXT:    store i32 0, i32* [[I]], align 4
17999 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18000 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18001 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18002 // CHECK10:       omp.precond.then:
18003 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18004 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18005 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18006 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
18007 // CHECK10-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
18008 // CHECK10-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
18009 // CHECK10-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
18010 // CHECK10-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
18011 // CHECK10-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
18012 // CHECK10-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18013 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18014 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8
18015 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18016 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18017 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18018 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
18019 // CHECK10-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
18020 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18021 // CHECK10:       omp.dispatch.cond:
18022 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18023 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
18024 // CHECK10-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
18025 // CHECK10-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
18026 // CHECK10-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18027 // CHECK10:       omp.dispatch.body:
18028 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18029 // CHECK10-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
18030 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18031 // CHECK10:       omp.inner.for.cond:
18032 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18033 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
18034 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
18035 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18036 // CHECK10:       omp.inner.for.body:
18037 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18038 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
18039 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18040 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
18041 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
18042 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18043 // CHECK10-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
18044 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
18045 // CHECK10-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
18046 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
18047 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18048 // CHECK10-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
18049 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
18050 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
18051 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
18052 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
18053 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
18054 // CHECK10-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
18055 // CHECK10-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
18056 // CHECK10-NEXT:    store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
18057 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18058 // CHECK10:       omp.body.continue:
18059 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18060 // CHECK10:       omp.inner.for.inc:
18061 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18062 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
18063 // CHECK10-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
18064 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
18065 // CHECK10:       omp.inner.for.end:
18066 // CHECK10-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18067 // CHECK10:       omp.dispatch.inc:
18068 // CHECK10-NEXT:    br label [[OMP_DISPATCH_COND]]
18069 // CHECK10:       omp.dispatch.end:
18070 // CHECK10-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18071 // CHECK10-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18072 // CHECK10-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18073 // CHECK10:       .omp.final.then:
18074 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18075 // CHECK10-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
18076 // CHECK10-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
18077 // CHECK10-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
18078 // CHECK10-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
18079 // CHECK10-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
18080 // CHECK10-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18081 // CHECK10:       .omp.final.done:
18082 // CHECK10-NEXT:    br label [[OMP_PRECOND_END]]
18083 // CHECK10:       omp.precond.end:
18084 // CHECK10-NEXT:    ret void
18085 //
18086 //
18087 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
18088 // CHECK10-SAME: () #[[ATTR4:[0-9]+]] {
18089 // CHECK10-NEXT:  entry:
18090 // CHECK10-NEXT:    call void @__tgt_register_requires(i64 1)
18091 // CHECK10-NEXT:    ret void
18092 //
18093 //
18094 // CHECK11-LABEL: define {{[^@]+}}@main
18095 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
18096 // CHECK11-NEXT:  entry:
18097 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
18098 // CHECK11-NEXT:    [[A:%.*]] = alloca double*, align 4
18099 // CHECK11-NEXT:    [[B:%.*]] = alloca double*, align 4
18100 // CHECK11-NEXT:    [[C:%.*]] = alloca double*, align 4
18101 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
18102 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
18103 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
18104 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
18105 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
18106 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
18107 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18108 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18109 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18110 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
18111 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
18112 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
18113 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
18114 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
18115 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
18116 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
18117 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
18118 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
18119 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
18120 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
18121 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
18122 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
18123 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
18124 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
18125 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
18126 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
18127 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
18128 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
18129 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
18130 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
18131 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
18132 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
18133 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
18134 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
18135 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
18136 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
18137 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
18138 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
18139 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
18140 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
18141 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
18142 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
18143 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
18144 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
18145 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
18146 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
18147 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
18148 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
18149 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
18150 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
18151 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
18152 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
18153 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
18154 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
18155 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
18156 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
18157 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
18158 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
18159 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
18160 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
18161 // CHECK11-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 4
18162 // CHECK11-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 4
18163 // CHECK11-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 4
18164 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18165 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
18166 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
18167 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18168 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
18169 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
18170 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
18171 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
18172 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
18173 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
18174 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 4
18175 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
18176 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
18177 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 4
18178 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
18179 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
18180 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
18181 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
18182 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 4
18183 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
18184 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
18185 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 4
18186 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
18187 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
18188 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
18189 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
18190 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 4
18191 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
18192 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
18193 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 4
18194 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
18195 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
18196 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
18197 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
18198 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
18199 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
18200 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18201 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
18202 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18203 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18204 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18205 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18206 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
18207 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
18208 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
18209 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18210 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18211 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
18212 // CHECK11:       omp_offload.failed:
18213 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
18214 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
18215 // CHECK11:       omp_offload.cont:
18216 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
18217 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
18218 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
18219 // CHECK11-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 4
18220 // CHECK11-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 4
18221 // CHECK11-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 4
18222 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
18223 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
18224 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
18225 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
18226 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
18227 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
18228 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
18229 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
18230 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
18231 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
18232 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 4
18233 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
18234 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
18235 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 4
18236 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
18237 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
18238 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
18239 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
18240 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 4
18241 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
18242 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
18243 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 4
18244 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
18245 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
18246 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
18247 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
18248 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 4
18249 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
18250 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
18251 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 4
18252 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
18253 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
18254 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
18255 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
18256 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
18257 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
18258 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
18259 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
18260 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
18261 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
18262 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
18263 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
18264 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
18265 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
18266 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
18267 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18268 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
18269 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
18270 // CHECK11:       omp_offload.failed14:
18271 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
18272 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
18273 // CHECK11:       omp_offload.cont15:
18274 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
18275 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
18276 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
18277 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
18278 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
18279 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
18280 // CHECK11-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 4
18281 // CHECK11-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 4
18282 // CHECK11-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 4
18283 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
18284 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
18285 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
18286 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
18287 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
18288 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
18289 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
18290 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
18291 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
18292 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
18293 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
18294 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
18295 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
18296 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
18297 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
18298 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
18299 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
18300 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
18301 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 4
18302 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
18303 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
18304 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 4
18305 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
18306 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
18307 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
18308 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
18309 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 4
18310 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
18311 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
18312 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 4
18313 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
18314 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
18315 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
18316 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
18317 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 4
18318 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
18319 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
18320 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 4
18321 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
18322 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
18323 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
18324 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
18325 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
18326 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
18327 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
18328 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
18329 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
18330 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
18331 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
18332 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
18333 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
18334 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
18335 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
18336 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18337 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
18338 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
18339 // CHECK11:       omp_offload.failed27:
18340 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP67]], i32 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
18341 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
18342 // CHECK11:       omp_offload.cont28:
18343 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
18344 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
18345 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
18346 // CHECK11-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 4
18347 // CHECK11-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 4
18348 // CHECK11-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 4
18349 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
18350 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
18351 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
18352 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
18353 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
18354 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
18355 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
18356 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
18357 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
18358 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
18359 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 4
18360 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
18361 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
18362 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 4
18363 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
18364 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
18365 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
18366 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
18367 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 4
18368 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
18369 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
18370 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 4
18371 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
18372 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
18373 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
18374 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
18375 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 4
18376 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
18377 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
18378 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 4
18379 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
18380 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
18381 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
18382 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
18383 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
18384 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
18385 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
18386 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
18387 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
18388 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
18389 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
18390 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
18391 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
18392 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
18393 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
18394 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18395 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
18396 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
18397 // CHECK11:       omp_offload.failed40:
18398 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
18399 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
18400 // CHECK11:       omp_offload.cont41:
18401 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
18402 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
18403 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
18404 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
18405 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
18406 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
18407 // CHECK11-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 4
18408 // CHECK11-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 4
18409 // CHECK11-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 4
18410 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
18411 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
18412 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
18413 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
18414 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
18415 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
18416 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
18417 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
18418 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
18419 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
18420 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
18421 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
18422 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
18423 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
18424 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
18425 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
18426 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
18427 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
18428 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 4
18429 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
18430 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
18431 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 4
18432 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
18433 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
18434 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
18435 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
18436 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 4
18437 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
18438 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
18439 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 4
18440 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
18441 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
18442 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
18443 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
18444 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 4
18445 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
18446 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
18447 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 4
18448 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
18449 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
18450 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
18451 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
18452 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
18453 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
18454 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
18455 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
18456 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
18457 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
18458 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
18459 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
18460 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
18461 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
18462 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
18463 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18464 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
18465 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
18466 // CHECK11:       omp_offload.failed54:
18467 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP140]], i32 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
18468 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
18469 // CHECK11:       omp_offload.cont55:
18470 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
18471 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
18472 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
18473 // CHECK11-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 4
18474 // CHECK11-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 4
18475 // CHECK11-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 4
18476 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
18477 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
18478 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
18479 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
18480 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
18481 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
18482 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
18483 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
18484 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
18485 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
18486 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 4
18487 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
18488 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
18489 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 4
18490 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
18491 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
18492 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
18493 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
18494 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 4
18495 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
18496 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
18497 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 4
18498 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
18499 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
18500 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
18501 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
18502 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 4
18503 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
18504 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
18505 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 4
18506 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
18507 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
18508 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
18509 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
18510 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
18511 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
18512 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
18513 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
18514 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
18515 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
18516 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
18517 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
18518 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
18519 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
18520 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
18521 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18522 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
18523 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
18524 // CHECK11:       omp_offload.failed67:
18525 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
18526 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
18527 // CHECK11:       omp_offload.cont68:
18528 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
18529 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
18530 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
18531 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
18532 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
18533 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
18534 // CHECK11-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 4
18535 // CHECK11-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 4
18536 // CHECK11-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 4
18537 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
18538 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
18539 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
18540 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
18541 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
18542 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
18543 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
18544 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
18545 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
18546 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
18547 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
18548 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
18549 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
18550 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
18551 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
18552 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
18553 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
18554 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
18555 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 4
18556 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
18557 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
18558 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 4
18559 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
18560 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
18561 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
18562 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
18563 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 4
18564 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
18565 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
18566 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 4
18567 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
18568 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
18569 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
18570 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
18571 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 4
18572 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
18573 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
18574 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 4
18575 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
18576 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
18577 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
18578 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
18579 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
18580 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
18581 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
18582 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
18583 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
18584 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
18585 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
18586 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
18587 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
18588 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
18589 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
18590 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
18591 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
18592 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
18593 // CHECK11:       omp_offload.failed81:
18594 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP213]], i32 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
18595 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
18596 // CHECK11:       omp_offload.cont82:
18597 // CHECK11-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
18598 // CHECK11-NEXT:    ret i32 [[CALL]]
18599 //
18600 //
18601 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
18602 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
18603 // CHECK11-NEXT:  entry:
18604 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18605 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
18606 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
18607 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
18608 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18609 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
18610 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
18611 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
18612 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
18613 // CHECK11-NEXT:    ret void
18614 //
18615 //
18616 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
18617 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18618 // CHECK11-NEXT:  entry:
18619 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18620 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18621 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18622 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18623 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18624 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18625 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18626 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18627 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18628 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18629 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18630 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18631 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18632 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18633 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18634 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18635 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18636 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18637 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18638 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18639 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18640 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18641 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18642 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18643 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18644 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18645 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18646 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18647 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18648 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18649 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18650 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18651 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18652 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18653 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18654 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18655 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18656 // CHECK11:       omp.precond.then:
18657 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18658 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18659 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
18660 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18661 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18662 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18663 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
18664 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18665 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18666 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18667 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
18668 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18669 // CHECK11:       cond.true:
18670 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18671 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18672 // CHECK11:       cond.false:
18673 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18674 // CHECK11-NEXT:    br label [[COND_END]]
18675 // CHECK11:       cond.end:
18676 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
18677 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18678 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18679 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
18680 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18681 // CHECK11:       omp.inner.for.cond:
18682 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18683 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
18684 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
18685 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18686 // CHECK11:       omp.inner.for.body:
18687 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
18688 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
18689 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
18690 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18691 // CHECK11:       omp.inner.for.inc:
18692 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18693 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
18694 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
18695 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18696 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
18697 // CHECK11:       omp.inner.for.end:
18698 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18699 // CHECK11:       omp.loop.exit:
18700 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18701 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
18702 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
18703 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18704 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
18705 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18706 // CHECK11:       .omp.final.then:
18707 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18708 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
18709 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
18710 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
18711 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
18712 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
18713 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18714 // CHECK11:       .omp.final.done:
18715 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18716 // CHECK11:       omp.precond.end:
18717 // CHECK11-NEXT:    ret void
18718 //
18719 //
18720 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
18721 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18722 // CHECK11-NEXT:  entry:
18723 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18724 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18725 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18726 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18727 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18728 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18729 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18730 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18731 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18732 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18733 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18734 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18735 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18736 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18737 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18738 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18739 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18740 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18741 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18742 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18743 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18744 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18745 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18746 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18747 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18748 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18749 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18750 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18751 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18752 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18753 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18754 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18755 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18756 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18757 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18758 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18759 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18760 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18761 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18762 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18763 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18764 // CHECK11:       omp.precond.then:
18765 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18766 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18767 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
18768 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18769 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18770 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
18771 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
18772 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18773 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18774 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18775 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
18776 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18777 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18778 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18779 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
18780 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18781 // CHECK11:       cond.true:
18782 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18783 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18784 // CHECK11:       cond.false:
18785 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18786 // CHECK11-NEXT:    br label [[COND_END]]
18787 // CHECK11:       cond.end:
18788 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
18789 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18790 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18791 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
18792 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18793 // CHECK11:       omp.inner.for.cond:
18794 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18795 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
18796 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
18797 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18798 // CHECK11:       omp.inner.for.body:
18799 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18800 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
18801 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18802 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
18803 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
18804 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18805 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
18806 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
18807 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
18808 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18809 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
18810 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
18811 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
18812 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
18813 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
18814 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
18815 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
18816 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18817 // CHECK11:       omp.body.continue:
18818 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18819 // CHECK11:       omp.inner.for.inc:
18820 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18821 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
18822 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
18823 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
18824 // CHECK11:       omp.inner.for.end:
18825 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18826 // CHECK11:       omp.loop.exit:
18827 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18828 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
18829 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
18830 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18831 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
18832 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18833 // CHECK11:       .omp.final.then:
18834 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18835 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
18836 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
18837 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
18838 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
18839 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
18840 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18841 // CHECK11:       .omp.final.done:
18842 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18843 // CHECK11:       omp.precond.end:
18844 // CHECK11-NEXT:    ret void
18845 //
18846 //
18847 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
18848 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
18849 // CHECK11-NEXT:  entry:
18850 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18851 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
18852 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
18853 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
18854 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18855 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
18856 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
18857 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
18858 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
18859 // CHECK11-NEXT:    ret void
18860 //
18861 //
18862 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
18863 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18864 // CHECK11-NEXT:  entry:
18865 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18866 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18867 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18868 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18869 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18870 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18871 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18872 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18873 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18874 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18875 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18876 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
18877 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
18878 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18879 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18880 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18881 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18882 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18883 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18884 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18885 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18886 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18887 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18888 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18889 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18890 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18891 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
18892 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18893 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18894 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
18895 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
18896 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
18897 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18898 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
18899 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18900 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
18901 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
18902 // CHECK11:       omp.precond.then:
18903 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
18904 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18905 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
18906 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18907 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18908 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18909 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
18910 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18911 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18912 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18913 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
18914 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18915 // CHECK11:       cond.true:
18916 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18917 // CHECK11-NEXT:    br label [[COND_END:%.*]]
18918 // CHECK11:       cond.false:
18919 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
18920 // CHECK11-NEXT:    br label [[COND_END]]
18921 // CHECK11:       cond.end:
18922 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
18923 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
18924 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
18925 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
18926 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18927 // CHECK11:       omp.inner.for.cond:
18928 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18929 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
18930 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
18931 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18932 // CHECK11:       omp.inner.for.body:
18933 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
18934 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
18935 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
18936 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18937 // CHECK11:       omp.inner.for.inc:
18938 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18939 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
18940 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
18941 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
18942 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
18943 // CHECK11:       omp.inner.for.end:
18944 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18945 // CHECK11:       omp.loop.exit:
18946 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18947 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
18948 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
18949 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18950 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
18951 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18952 // CHECK11:       .omp.final.then:
18953 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18954 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
18955 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
18956 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
18957 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
18958 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
18959 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18960 // CHECK11:       .omp.final.done:
18961 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
18962 // CHECK11:       omp.precond.end:
18963 // CHECK11-NEXT:    ret void
18964 //
18965 //
18966 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
18967 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
18968 // CHECK11-NEXT:  entry:
18969 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18970 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18971 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
18972 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
18973 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
18974 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
18975 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
18976 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
18977 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18978 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18979 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18980 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18981 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
18982 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18983 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18984 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18985 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18986 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
18987 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18988 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18989 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
18990 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
18991 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
18992 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
18993 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
18994 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
18995 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
18996 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
18997 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
18998 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
18999 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19000 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19001 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19002 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19003 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19004 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19005 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19006 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19007 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19008 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19009 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19010 // CHECK11:       omp.precond.then:
19011 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19012 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19013 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19014 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19015 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19016 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19017 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19018 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19019 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19020 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19021 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19022 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19023 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19024 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19025 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19026 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19027 // CHECK11:       cond.true:
19028 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19029 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19030 // CHECK11:       cond.false:
19031 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19032 // CHECK11-NEXT:    br label [[COND_END]]
19033 // CHECK11:       cond.end:
19034 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19035 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19036 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19037 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19038 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19039 // CHECK11:       omp.inner.for.cond:
19040 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19041 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
19042 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19043 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19044 // CHECK11:       omp.inner.for.body:
19045 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19046 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19047 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19048 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
19049 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
19050 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19051 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19052 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
19053 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
19054 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19055 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19056 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
19057 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19058 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
19059 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
19060 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19061 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
19062 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19063 // CHECK11:       omp.body.continue:
19064 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19065 // CHECK11:       omp.inner.for.inc:
19066 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19067 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19068 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19069 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
19070 // CHECK11:       omp.inner.for.end:
19071 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19072 // CHECK11:       omp.loop.exit:
19073 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19074 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19075 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19076 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19077 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19078 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19079 // CHECK11:       .omp.final.then:
19080 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19081 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19082 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19083 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19084 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19085 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19086 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19087 // CHECK11:       .omp.final.done:
19088 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19089 // CHECK11:       omp.precond.end:
19090 // CHECK11-NEXT:    ret void
19091 //
19092 //
19093 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
19094 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19095 // CHECK11-NEXT:  entry:
19096 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
19097 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19098 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19099 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19100 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19101 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
19102 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19103 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19104 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19105 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19106 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19107 // CHECK11-NEXT:    ret void
19108 //
19109 //
19110 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6
19111 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19112 // CHECK11-NEXT:  entry:
19113 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19114 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19115 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
19116 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19117 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19118 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19119 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19120 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19121 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19122 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19123 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19124 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19125 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19126 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19127 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19128 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19129 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19130 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19131 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19132 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
19133 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19134 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19135 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19136 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19137 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
19138 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19139 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
19140 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
19141 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
19142 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
19143 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
19144 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19145 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
19146 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19147 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19148 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19149 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19150 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19151 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
19152 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19153 // CHECK11:       omp.precond.then:
19154 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19155 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19156 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
19157 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19158 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19159 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
19160 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19161 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19162 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
19163 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19164 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19165 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19166 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19167 // CHECK11:       cond.true:
19168 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19169 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19170 // CHECK11:       cond.false:
19171 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19172 // CHECK11-NEXT:    br label [[COND_END]]
19173 // CHECK11:       cond.end:
19174 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19175 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19176 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19177 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19178 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19179 // CHECK11:       omp.inner.for.cond:
19180 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19181 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19182 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
19183 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
19184 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19185 // CHECK11:       omp.inner.for.body:
19186 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19187 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19188 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
19189 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19190 // CHECK11:       omp.inner.for.inc:
19191 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19192 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19193 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
19194 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19195 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19196 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19197 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
19198 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19199 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19200 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
19201 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
19202 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19203 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19204 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19205 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
19206 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
19207 // CHECK11:       cond.true10:
19208 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
19209 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
19210 // CHECK11:       cond.false11:
19211 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19212 // CHECK11-NEXT:    br label [[COND_END12]]
19213 // CHECK11:       cond.end12:
19214 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
19215 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
19216 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
19217 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19218 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
19219 // CHECK11:       omp.inner.for.end:
19220 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19221 // CHECK11:       omp.loop.exit:
19222 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19223 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
19224 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
19225 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19226 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
19227 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19228 // CHECK11:       .omp.final.then:
19229 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19230 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
19231 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
19232 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
19233 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
19234 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
19235 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19236 // CHECK11:       .omp.final.done:
19237 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19238 // CHECK11:       omp.precond.end:
19239 // CHECK11-NEXT:    ret void
19240 //
19241 //
19242 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7
19243 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19244 // CHECK11-NEXT:  entry:
19245 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19246 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19247 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19248 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19249 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19250 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19251 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19252 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19253 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19254 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19255 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19256 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19257 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19258 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19259 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19260 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19261 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19262 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19263 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19264 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19265 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19266 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19267 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19268 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19269 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19270 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19271 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19272 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19273 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19274 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19275 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19276 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19277 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19278 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19279 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19280 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19281 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19282 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19283 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19284 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19285 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19286 // CHECK11:       omp.precond.then:
19287 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19288 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19289 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19290 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19291 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19292 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19293 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19294 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19295 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19296 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19297 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19298 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19299 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19300 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19301 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19302 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19303 // CHECK11:       cond.true:
19304 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19305 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19306 // CHECK11:       cond.false:
19307 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19308 // CHECK11-NEXT:    br label [[COND_END]]
19309 // CHECK11:       cond.end:
19310 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19311 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19312 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19313 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19314 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19315 // CHECK11:       omp.inner.for.cond:
19316 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19317 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
19318 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19319 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19320 // CHECK11:       omp.inner.for.body:
19321 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19322 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19323 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19324 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
19325 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
19326 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19327 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19328 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
19329 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
19330 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19331 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19332 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
19333 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19334 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
19335 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
19336 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19337 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
19338 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19339 // CHECK11:       omp.body.continue:
19340 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19341 // CHECK11:       omp.inner.for.inc:
19342 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19343 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19344 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
19345 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
19346 // CHECK11:       omp.inner.for.end:
19347 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19348 // CHECK11:       omp.loop.exit:
19349 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19350 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19351 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19352 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19353 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19354 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19355 // CHECK11:       .omp.final.then:
19356 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19357 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19358 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19359 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19360 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19361 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19362 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19363 // CHECK11:       .omp.final.done:
19364 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19365 // CHECK11:       omp.precond.end:
19366 // CHECK11-NEXT:    ret void
19367 //
19368 //
19369 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
19370 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19371 // CHECK11-NEXT:  entry:
19372 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19373 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19374 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19375 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19376 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19377 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19378 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19379 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19380 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19381 // CHECK11-NEXT:    ret void
19382 //
19383 //
19384 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10
19385 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19386 // CHECK11-NEXT:  entry:
19387 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19388 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19389 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19390 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19391 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19392 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19393 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19394 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19395 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19396 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19397 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19398 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19399 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19400 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19401 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19402 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19403 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19404 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19405 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19406 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19407 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19408 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19409 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19410 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19411 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19412 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19413 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19414 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19415 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19416 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19417 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19418 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19419 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19420 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19421 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19422 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19423 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19424 // CHECK11:       omp.precond.then:
19425 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19426 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19427 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
19428 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19429 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19430 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19431 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19432 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19433 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19434 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19435 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
19436 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19437 // CHECK11:       cond.true:
19438 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19439 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19440 // CHECK11:       cond.false:
19441 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19442 // CHECK11-NEXT:    br label [[COND_END]]
19443 // CHECK11:       cond.end:
19444 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19445 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19446 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19447 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
19448 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19449 // CHECK11:       omp.inner.for.cond:
19450 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19451 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
19452 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
19453 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19454 // CHECK11:       omp.inner.for.body:
19455 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
19456 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
19457 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
19458 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19459 // CHECK11:       omp.inner.for.inc:
19460 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19461 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
19462 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
19463 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
19464 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
19465 // CHECK11:       omp.inner.for.end:
19466 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19467 // CHECK11:       omp.loop.exit:
19468 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19469 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
19470 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
19471 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19472 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
19473 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19474 // CHECK11:       .omp.final.then:
19475 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19476 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
19477 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
19478 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
19479 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
19480 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
19481 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19482 // CHECK11:       .omp.final.done:
19483 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19484 // CHECK11:       omp.precond.end:
19485 // CHECK11-NEXT:    ret void
19486 //
19487 //
19488 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11
19489 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19490 // CHECK11-NEXT:  entry:
19491 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19492 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19493 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19494 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19495 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19496 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19497 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19498 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19499 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19500 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19501 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19502 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19503 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19504 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19505 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19506 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19507 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19508 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19509 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19510 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19511 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19512 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19513 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19514 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19515 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19516 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19517 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19518 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19519 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19520 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19521 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19522 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19523 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19524 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19525 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19526 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19527 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19528 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19529 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19530 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19531 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19532 // CHECK11:       omp.precond.then:
19533 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19534 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19535 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19536 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19537 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19538 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19539 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19540 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19541 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19542 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19543 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19544 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19545 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19546 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19547 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19548 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19549 // CHECK11:       cond.true:
19550 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19551 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19552 // CHECK11:       cond.false:
19553 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19554 // CHECK11-NEXT:    br label [[COND_END]]
19555 // CHECK11:       cond.end:
19556 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19557 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19558 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19559 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19560 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19561 // CHECK11:       omp.inner.for.cond:
19562 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19563 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
19564 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19565 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19566 // CHECK11:       omp.inner.for.body:
19567 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19568 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
19569 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19570 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
19571 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
19572 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19573 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
19574 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
19575 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
19576 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19577 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19578 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
19579 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
19580 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
19581 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
19582 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19583 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
19584 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19585 // CHECK11:       omp.body.continue:
19586 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19587 // CHECK11:       omp.inner.for.inc:
19588 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19589 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
19590 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
19591 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
19592 // CHECK11:       omp.inner.for.end:
19593 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19594 // CHECK11:       omp.loop.exit:
19595 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19596 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
19597 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
19598 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19599 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
19600 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19601 // CHECK11:       .omp.final.then:
19602 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19603 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
19604 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
19605 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
19606 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
19607 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
19608 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19609 // CHECK11:       .omp.final.done:
19610 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19611 // CHECK11:       omp.precond.end:
19612 // CHECK11-NEXT:    ret void
19613 //
19614 //
19615 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
19616 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19617 // CHECK11-NEXT:  entry:
19618 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
19619 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19620 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19621 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19622 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19623 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
19624 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19625 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19626 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19627 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19628 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19629 // CHECK11-NEXT:    ret void
19630 //
19631 //
19632 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14
19633 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19634 // CHECK11-NEXT:  entry:
19635 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19636 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19637 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
19638 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19639 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19640 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19641 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19642 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19643 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19644 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19645 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19646 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19647 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19648 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19649 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19650 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19651 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19652 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
19653 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19654 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19655 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19656 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
19657 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19658 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19659 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19660 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19661 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
19662 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19663 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
19664 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
19665 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
19666 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
19667 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
19668 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
19669 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19670 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19671 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
19672 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19673 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
19674 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19675 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19676 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19677 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
19678 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19679 // CHECK11:       omp.precond.then:
19680 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19681 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19682 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
19683 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19684 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19685 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19686 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
19687 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19688 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19689 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19690 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
19691 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19692 // CHECK11:       cond.true:
19693 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19694 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19695 // CHECK11:       cond.false:
19696 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19697 // CHECK11-NEXT:    br label [[COND_END]]
19698 // CHECK11:       cond.end:
19699 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
19700 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19701 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19702 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
19703 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19704 // CHECK11:       omp.inner.for.cond:
19705 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19706 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
19707 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
19708 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19709 // CHECK11:       omp.inner.for.body:
19710 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
19711 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
19712 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
19713 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
19714 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
19715 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
19716 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19717 // CHECK11:       omp.inner.for.inc:
19718 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19719 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
19720 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
19721 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
19722 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
19723 // CHECK11:       omp.inner.for.end:
19724 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19725 // CHECK11:       omp.loop.exit:
19726 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19727 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
19728 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
19729 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19730 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
19731 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19732 // CHECK11:       .omp.final.then:
19733 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19734 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
19735 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
19736 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
19737 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
19738 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
19739 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19740 // CHECK11:       .omp.final.done:
19741 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19742 // CHECK11:       omp.precond.end:
19743 // CHECK11-NEXT:    ret void
19744 //
19745 //
19746 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15
19747 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
19748 // CHECK11-NEXT:  entry:
19749 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19750 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19751 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
19752 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
19753 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19754 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19755 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19756 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19757 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19758 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19759 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19760 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19761 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
19762 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19763 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19764 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19765 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19766 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19767 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
19768 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19769 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19770 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19771 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19772 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19773 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19774 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19775 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19776 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19777 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19778 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19779 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19780 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19781 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19782 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19783 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19784 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19785 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19786 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
19787 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
19788 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19789 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19790 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19791 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19792 // CHECK11:       omp.precond.then:
19793 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19794 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
19795 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
19796 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
19797 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19798 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
19799 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
19800 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19801 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19802 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19803 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19804 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
19805 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
19806 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19807 // CHECK11:       omp.dispatch.cond:
19808 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19809 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19810 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
19811 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19812 // CHECK11:       cond.true:
19813 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
19814 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19815 // CHECK11:       cond.false:
19816 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19817 // CHECK11-NEXT:    br label [[COND_END]]
19818 // CHECK11:       cond.end:
19819 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
19820 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19821 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19822 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
19823 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19824 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19825 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
19826 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19827 // CHECK11:       omp.dispatch.body:
19828 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19829 // CHECK11:       omp.inner.for.cond:
19830 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19831 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
19832 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
19833 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19834 // CHECK11:       omp.inner.for.body:
19835 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19836 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
19837 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19838 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
19839 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
19840 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19841 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
19842 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
19843 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
19844 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19845 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
19846 // CHECK11-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
19847 // CHECK11-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
19848 // CHECK11-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
19849 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
19850 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
19851 // CHECK11-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
19852 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19853 // CHECK11:       omp.body.continue:
19854 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19855 // CHECK11:       omp.inner.for.inc:
19856 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19857 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
19858 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
19859 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
19860 // CHECK11:       omp.inner.for.end:
19861 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19862 // CHECK11:       omp.dispatch.inc:
19863 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19864 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19865 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
19866 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
19867 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19868 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19869 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
19870 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
19871 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
19872 // CHECK11:       omp.dispatch.end:
19873 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19874 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
19875 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
19876 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19877 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
19878 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19879 // CHECK11:       .omp.final.then:
19880 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19881 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
19882 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
19883 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
19884 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
19885 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
19886 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19887 // CHECK11:       .omp.final.done:
19888 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
19889 // CHECK11:       omp.precond.end:
19890 // CHECK11-NEXT:    ret void
19891 //
19892 //
19893 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
19894 // CHECK11-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
19895 // CHECK11-NEXT:  entry:
19896 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
19897 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
19898 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
19899 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
19900 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
19901 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
19902 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
19903 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
19904 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
19905 // CHECK11-NEXT:    ret void
19906 //
19907 //
19908 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18
19909 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
19910 // CHECK11-NEXT:  entry:
19911 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19912 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19913 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
19914 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
19915 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
19916 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
19917 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19918 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19919 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19920 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
19921 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
19922 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
19923 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
19924 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19925 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19926 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
19927 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19928 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19929 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
19930 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
19931 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
19932 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
19933 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
19934 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
19935 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
19936 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
19937 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
19938 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
19939 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19940 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
19941 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
19942 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
19943 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
19944 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
19945 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19946 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
19947 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19948 // CHECK11:       omp.precond.then:
19949 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
19950 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19951 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
19952 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19953 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19954 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19955 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19956 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19957 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19958 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19959 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
19960 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19961 // CHECK11:       cond.true:
19962 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
19963 // CHECK11-NEXT:    br label [[COND_END:%.*]]
19964 // CHECK11:       cond.false:
19965 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
19966 // CHECK11-NEXT:    br label [[COND_END]]
19967 // CHECK11:       cond.end:
19968 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
19969 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
19970 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
19971 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
19972 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19973 // CHECK11:       omp.inner.for.cond:
19974 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19975 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
19976 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
19977 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19978 // CHECK11:       omp.inner.for.body:
19979 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
19980 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
19981 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
19982 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19983 // CHECK11:       omp.inner.for.inc:
19984 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19985 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
19986 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
19987 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
19988 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
19989 // CHECK11:       omp.inner.for.end:
19990 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19991 // CHECK11:       omp.loop.exit:
19992 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19993 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
19994 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
19995 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19996 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
19997 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19998 // CHECK11:       .omp.final.then:
19999 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20000 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
20001 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
20002 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
20003 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
20004 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
20005 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20006 // CHECK11:       .omp.final.done:
20007 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20008 // CHECK11:       omp.precond.end:
20009 // CHECK11-NEXT:    ret void
20010 //
20011 //
20012 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19
20013 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20014 // CHECK11-NEXT:  entry:
20015 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20016 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20017 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
20018 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
20019 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20020 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20021 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20022 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20023 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20024 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20025 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20026 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20027 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20028 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20029 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20030 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20031 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20032 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
20033 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20034 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20035 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20036 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20037 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20038 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20039 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20040 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20041 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20042 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
20043 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
20044 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
20045 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20046 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
20047 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20048 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20049 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20050 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20051 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20052 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20053 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20054 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20055 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20056 // CHECK11:       omp.precond.then:
20057 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20058 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20059 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
20060 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20061 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20062 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
20063 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
20064 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20065 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20066 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20067 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20068 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20069 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
20070 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
20071 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20072 // CHECK11:       omp.dispatch.cond:
20073 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20074 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
20075 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20076 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
20077 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20078 // CHECK11:       omp.dispatch.body:
20079 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20080 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
20081 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20082 // CHECK11:       omp.inner.for.cond:
20083 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20084 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
20085 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
20086 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20087 // CHECK11:       omp.inner.for.body:
20088 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20089 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
20090 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20091 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
20092 // CHECK11-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
20093 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20094 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
20095 // CHECK11-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
20096 // CHECK11-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
20097 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20098 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
20099 // CHECK11-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
20100 // CHECK11-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
20101 // CHECK11-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
20102 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
20103 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
20104 // CHECK11-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
20105 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20106 // CHECK11:       omp.body.continue:
20107 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20108 // CHECK11:       omp.inner.for.inc:
20109 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20110 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
20111 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
20112 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
20113 // CHECK11:       omp.inner.for.end:
20114 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20115 // CHECK11:       omp.dispatch.inc:
20116 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
20117 // CHECK11:       omp.dispatch.end:
20118 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20119 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
20120 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20121 // CHECK11:       .omp.final.then:
20122 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20123 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
20124 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
20125 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
20126 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
20127 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
20128 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20129 // CHECK11:       .omp.final.done:
20130 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20131 // CHECK11:       omp.precond.end:
20132 // CHECK11-NEXT:    ret void
20133 //
20134 //
20135 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
20136 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
20137 // CHECK11-NEXT:  entry:
20138 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
20139 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20140 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
20141 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
20142 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
20143 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
20144 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20145 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
20146 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
20147 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
20148 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
20149 // CHECK11-NEXT:    ret void
20150 //
20151 //
20152 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..22
20153 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20154 // CHECK11-NEXT:  entry:
20155 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20156 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20157 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
20158 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20159 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20160 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20161 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20162 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20163 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20164 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20165 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20166 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20167 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20168 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20169 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20170 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20171 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20172 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
20173 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
20174 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20175 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20176 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
20177 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20178 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20179 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20180 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20181 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
20182 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20183 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
20184 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
20185 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
20186 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
20187 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
20188 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
20189 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20190 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20191 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
20192 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20193 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
20194 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20195 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20196 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20197 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
20198 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20199 // CHECK11:       omp.precond.then:
20200 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20201 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20202 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
20203 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20204 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20205 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20206 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
20207 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20208 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20209 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20210 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
20211 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20212 // CHECK11:       cond.true:
20213 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20214 // CHECK11-NEXT:    br label [[COND_END:%.*]]
20215 // CHECK11:       cond.false:
20216 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20217 // CHECK11-NEXT:    br label [[COND_END]]
20218 // CHECK11:       cond.end:
20219 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
20220 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20221 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20222 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
20223 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20224 // CHECK11:       omp.inner.for.cond:
20225 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20226 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
20227 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
20228 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20229 // CHECK11:       omp.inner.for.body:
20230 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
20231 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
20232 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
20233 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
20234 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
20235 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
20236 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20237 // CHECK11:       omp.inner.for.inc:
20238 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20239 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
20240 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
20241 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
20242 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
20243 // CHECK11:       omp.inner.for.end:
20244 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20245 // CHECK11:       omp.loop.exit:
20246 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20247 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
20248 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
20249 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20250 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
20251 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20252 // CHECK11:       .omp.final.then:
20253 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20254 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
20255 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
20256 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
20257 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
20258 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
20259 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20260 // CHECK11:       .omp.final.done:
20261 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20262 // CHECK11:       omp.precond.end:
20263 // CHECK11-NEXT:    ret void
20264 //
20265 //
20266 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..23
20267 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
20268 // CHECK11-NEXT:  entry:
20269 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20270 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20271 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
20272 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
20273 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20274 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
20275 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
20276 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
20277 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
20278 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20279 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20280 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20281 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20282 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20283 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20284 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20285 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20286 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20287 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
20288 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20289 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20290 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20291 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20292 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20293 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
20294 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
20295 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
20296 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20297 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20298 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
20299 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
20300 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
20301 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20302 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20303 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20304 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20305 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20306 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
20307 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20308 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20309 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20310 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20311 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20312 // CHECK11:       omp.precond.then:
20313 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20314 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20315 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
20316 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
20317 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
20318 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
20319 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
20320 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20321 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20322 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20323 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20324 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20325 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20326 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
20327 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
20328 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20329 // CHECK11:       omp.dispatch.cond:
20330 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20331 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
20332 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
20333 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
20334 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20335 // CHECK11:       omp.dispatch.body:
20336 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20337 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
20338 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20339 // CHECK11:       omp.inner.for.cond:
20340 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20341 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
20342 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
20343 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20344 // CHECK11:       omp.inner.for.body:
20345 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20346 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
20347 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20348 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
20349 // CHECK11-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
20350 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20351 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
20352 // CHECK11-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
20353 // CHECK11-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
20354 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20355 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
20356 // CHECK11-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
20357 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
20358 // CHECK11-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
20359 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
20360 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
20361 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
20362 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20363 // CHECK11:       omp.body.continue:
20364 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20365 // CHECK11:       omp.inner.for.inc:
20366 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20367 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
20368 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
20369 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
20370 // CHECK11:       omp.inner.for.end:
20371 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20372 // CHECK11:       omp.dispatch.inc:
20373 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
20374 // CHECK11:       omp.dispatch.end:
20375 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20376 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
20377 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20378 // CHECK11:       .omp.final.then:
20379 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20380 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
20381 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
20382 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
20383 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
20384 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
20385 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20386 // CHECK11:       .omp.final.done:
20387 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
20388 // CHECK11:       omp.precond.end:
20389 // CHECK11-NEXT:    ret void
20390 //
20391 //
20392 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
20393 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] comdat {
20394 // CHECK11-NEXT:  entry:
20395 // CHECK11-NEXT:    [[A:%.*]] = alloca i32*, align 4
20396 // CHECK11-NEXT:    [[B:%.*]] = alloca i32*, align 4
20397 // CHECK11-NEXT:    [[C:%.*]] = alloca i32*, align 4
20398 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
20399 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
20400 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
20401 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
20402 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
20403 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
20404 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20405 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20406 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20407 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
20408 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
20409 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
20410 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
20411 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
20412 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
20413 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
20414 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
20415 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
20416 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
20417 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
20418 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
20419 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
20420 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
20421 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
20422 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
20423 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
20424 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
20425 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
20426 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
20427 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
20428 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
20429 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
20430 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
20431 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
20432 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
20433 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
20434 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
20435 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
20436 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
20437 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
20438 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
20439 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
20440 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
20441 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
20442 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
20443 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
20444 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
20445 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
20446 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
20447 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
20448 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
20449 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
20450 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
20451 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
20452 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
20453 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
20454 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
20455 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
20456 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
20457 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
20458 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
20459 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
20460 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
20461 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
20462 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
20463 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
20464 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
20465 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
20466 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
20467 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
20468 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
20469 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
20470 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 4
20471 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
20472 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
20473 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 4
20474 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
20475 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
20476 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
20477 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
20478 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 4
20479 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
20480 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
20481 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 4
20482 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
20483 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
20484 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
20485 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
20486 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 4
20487 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
20488 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
20489 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 4
20490 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
20491 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
20492 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
20493 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
20494 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
20495 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
20496 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20497 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
20498 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20499 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20500 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20501 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20502 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
20503 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
20504 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
20505 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20506 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
20507 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
20508 // CHECK11:       omp_offload.failed:
20509 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
20510 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
20511 // CHECK11:       omp_offload.cont:
20512 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
20513 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
20514 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
20515 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 4
20516 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 4
20517 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 4
20518 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
20519 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
20520 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
20521 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
20522 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
20523 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
20524 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
20525 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
20526 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
20527 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
20528 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 4
20529 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
20530 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
20531 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 4
20532 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
20533 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
20534 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
20535 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
20536 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 4
20537 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
20538 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
20539 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 4
20540 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
20541 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
20542 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
20543 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
20544 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 4
20545 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
20546 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
20547 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 4
20548 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
20549 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
20550 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
20551 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
20552 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
20553 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
20554 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
20555 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
20556 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
20557 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
20558 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
20559 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
20560 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
20561 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
20562 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
20563 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20564 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
20565 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
20566 // CHECK11:       omp_offload.failed14:
20567 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
20568 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
20569 // CHECK11:       omp_offload.cont15:
20570 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
20571 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
20572 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
20573 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
20574 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
20575 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
20576 // CHECK11-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 4
20577 // CHECK11-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 4
20578 // CHECK11-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 4
20579 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
20580 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
20581 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
20582 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
20583 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
20584 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
20585 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
20586 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
20587 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
20588 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
20589 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
20590 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
20591 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
20592 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
20593 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
20594 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
20595 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
20596 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
20597 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 4
20598 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
20599 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
20600 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 4
20601 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
20602 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
20603 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
20604 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
20605 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 4
20606 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
20607 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
20608 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 4
20609 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
20610 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
20611 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
20612 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
20613 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 4
20614 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
20615 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
20616 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 4
20617 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
20618 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
20619 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
20620 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
20621 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
20622 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
20623 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
20624 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
20625 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
20626 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
20627 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
20628 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
20629 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
20630 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
20631 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
20632 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20633 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
20634 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
20635 // CHECK11:       omp_offload.failed27:
20636 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP67]], i32 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
20637 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
20638 // CHECK11:       omp_offload.cont28:
20639 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
20640 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
20641 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
20642 // CHECK11-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 4
20643 // CHECK11-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 4
20644 // CHECK11-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 4
20645 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
20646 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
20647 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
20648 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
20649 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
20650 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
20651 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
20652 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
20653 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
20654 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
20655 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 4
20656 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
20657 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
20658 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 4
20659 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
20660 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
20661 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
20662 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
20663 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 4
20664 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
20665 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
20666 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 4
20667 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
20668 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
20669 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
20670 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
20671 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 4
20672 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
20673 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
20674 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 4
20675 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
20676 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
20677 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
20678 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
20679 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
20680 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
20681 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
20682 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
20683 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
20684 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
20685 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
20686 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
20687 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
20688 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
20689 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
20690 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20691 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
20692 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
20693 // CHECK11:       omp_offload.failed40:
20694 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
20695 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
20696 // CHECK11:       omp_offload.cont41:
20697 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
20698 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
20699 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
20700 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
20701 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
20702 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
20703 // CHECK11-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 4
20704 // CHECK11-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 4
20705 // CHECK11-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 4
20706 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
20707 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
20708 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
20709 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
20710 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
20711 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
20712 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
20713 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
20714 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
20715 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
20716 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
20717 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
20718 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
20719 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
20720 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
20721 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
20722 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
20723 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
20724 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 4
20725 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
20726 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
20727 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 4
20728 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
20729 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
20730 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
20731 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
20732 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 4
20733 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
20734 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
20735 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 4
20736 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
20737 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
20738 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
20739 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
20740 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 4
20741 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
20742 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
20743 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 4
20744 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
20745 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
20746 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
20747 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
20748 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
20749 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
20750 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
20751 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
20752 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
20753 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
20754 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
20755 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
20756 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
20757 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
20758 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
20759 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20760 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
20761 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
20762 // CHECK11:       omp_offload.failed54:
20763 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP140]], i32 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
20764 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
20765 // CHECK11:       omp_offload.cont55:
20766 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
20767 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
20768 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
20769 // CHECK11-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 4
20770 // CHECK11-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 4
20771 // CHECK11-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 4
20772 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
20773 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
20774 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
20775 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
20776 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
20777 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
20778 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
20779 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
20780 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
20781 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
20782 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 4
20783 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
20784 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
20785 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 4
20786 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
20787 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
20788 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
20789 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
20790 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 4
20791 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
20792 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
20793 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 4
20794 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
20795 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
20796 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
20797 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
20798 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 4
20799 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
20800 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
20801 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 4
20802 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
20803 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
20804 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
20805 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
20806 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
20807 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
20808 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
20809 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
20810 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
20811 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
20812 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
20813 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
20814 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
20815 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
20816 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
20817 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20818 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
20819 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
20820 // CHECK11:       omp_offload.failed67:
20821 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
20822 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
20823 // CHECK11:       omp_offload.cont68:
20824 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
20825 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
20826 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
20827 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
20828 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
20829 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
20830 // CHECK11-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 4
20831 // CHECK11-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 4
20832 // CHECK11-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 4
20833 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
20834 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
20835 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
20836 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
20837 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
20838 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
20839 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
20840 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
20841 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
20842 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
20843 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
20844 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
20845 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
20846 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
20847 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
20848 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
20849 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
20850 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
20851 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 4
20852 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
20853 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
20854 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 4
20855 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
20856 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
20857 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
20858 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
20859 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 4
20860 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
20861 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
20862 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 4
20863 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
20864 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
20865 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
20866 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
20867 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 4
20868 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
20869 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
20870 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 4
20871 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
20872 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
20873 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
20874 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
20875 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
20876 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
20877 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
20878 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
20879 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
20880 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
20881 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
20882 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
20883 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
20884 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
20885 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
20886 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
20887 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
20888 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
20889 // CHECK11:       omp_offload.failed81:
20890 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP213]], i32 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
20891 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
20892 // CHECK11:       omp_offload.cont82:
20893 // CHECK11-NEXT:    ret i32 0
20894 //
20895 //
20896 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
20897 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
20898 // CHECK11-NEXT:  entry:
20899 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20900 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
20901 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
20902 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
20903 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20904 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
20905 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
20906 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
20907 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
20908 // CHECK11-NEXT:    ret void
20909 //
20910 //
20911 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..26
20912 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
20913 // CHECK11-NEXT:  entry:
20914 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20915 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20916 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
20917 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
20918 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
20919 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
20920 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20921 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20922 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20923 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
20924 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
20925 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
20926 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
20927 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20928 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20929 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
20930 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20931 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20932 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
20933 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
20934 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
20935 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
20936 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
20937 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
20938 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
20939 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
20940 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
20941 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
20942 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20943 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
20944 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
20945 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
20946 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
20947 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
20948 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20949 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
20950 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20951 // CHECK11:       omp.precond.then:
20952 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
20953 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20954 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
20955 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20956 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20957 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20958 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
20959 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20960 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20961 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20962 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
20963 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20964 // CHECK11:       cond.true:
20965 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
20966 // CHECK11-NEXT:    br label [[COND_END:%.*]]
20967 // CHECK11:       cond.false:
20968 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
20969 // CHECK11-NEXT:    br label [[COND_END]]
20970 // CHECK11:       cond.end:
20971 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
20972 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
20973 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
20974 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
20975 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20976 // CHECK11:       omp.inner.for.cond:
20977 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20978 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
20979 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
20980 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20981 // CHECK11:       omp.inner.for.body:
20982 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
20983 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
20984 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
20985 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20986 // CHECK11:       omp.inner.for.inc:
20987 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20988 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
20989 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
20990 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
20991 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
20992 // CHECK11:       omp.inner.for.end:
20993 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20994 // CHECK11:       omp.loop.exit:
20995 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20996 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
20997 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
20998 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20999 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
21000 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21001 // CHECK11:       .omp.final.then:
21002 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21003 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
21004 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
21005 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21006 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21007 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21008 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21009 // CHECK11:       .omp.final.done:
21010 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21011 // CHECK11:       omp.precond.end:
21012 // CHECK11-NEXT:    ret void
21013 //
21014 //
21015 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..27
21016 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21017 // CHECK11-NEXT:  entry:
21018 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21019 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21020 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21021 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21022 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21023 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21024 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21025 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21026 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21027 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21028 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21029 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21030 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21031 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21032 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21033 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21034 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21035 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21036 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21037 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21038 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21039 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21040 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21041 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21042 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21043 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21044 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21045 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21046 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21047 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21048 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21049 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21050 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21051 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21052 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21053 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21054 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21055 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21056 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21057 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21058 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21059 // CHECK11:       omp.precond.then:
21060 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21061 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21062 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21063 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21064 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21065 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21066 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21067 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21068 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21069 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21070 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21071 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21072 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21073 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21074 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21075 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21076 // CHECK11:       cond.true:
21077 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21078 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21079 // CHECK11:       cond.false:
21080 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21081 // CHECK11-NEXT:    br label [[COND_END]]
21082 // CHECK11:       cond.end:
21083 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21084 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21085 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21086 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21087 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21088 // CHECK11:       omp.inner.for.cond:
21089 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21090 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
21091 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21092 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21093 // CHECK11:       omp.inner.for.body:
21094 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21095 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21096 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21097 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
21098 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
21099 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21100 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21101 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
21102 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
21103 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21104 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21105 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
21106 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21107 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
21108 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
21109 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21110 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
21111 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21112 // CHECK11:       omp.body.continue:
21113 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21114 // CHECK11:       omp.inner.for.inc:
21115 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21116 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21117 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
21118 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
21119 // CHECK11:       omp.inner.for.end:
21120 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21121 // CHECK11:       omp.loop.exit:
21122 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21123 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21124 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21125 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21126 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21127 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21128 // CHECK11:       .omp.final.then:
21129 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21130 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21131 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21132 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21133 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21134 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21135 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21136 // CHECK11:       .omp.final.done:
21137 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21138 // CHECK11:       omp.precond.end:
21139 // CHECK11-NEXT:    ret void
21140 //
21141 //
21142 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
21143 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21144 // CHECK11-NEXT:  entry:
21145 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21146 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21147 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21148 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21149 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21150 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21151 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21152 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21153 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21154 // CHECK11-NEXT:    ret void
21155 //
21156 //
21157 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..30
21158 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21159 // CHECK11-NEXT:  entry:
21160 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21161 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21162 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21163 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21164 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21165 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21166 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21167 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21168 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21169 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21170 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21171 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21172 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21173 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21174 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21175 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21176 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21177 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21178 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21179 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21180 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21181 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21182 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21183 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21184 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21185 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21186 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21187 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21188 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21189 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21190 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21191 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21192 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21193 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21194 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21195 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21196 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21197 // CHECK11:       omp.precond.then:
21198 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21199 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21200 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
21201 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21202 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21203 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21204 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21205 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21206 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21207 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21208 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
21209 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21210 // CHECK11:       cond.true:
21211 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21212 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21213 // CHECK11:       cond.false:
21214 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21215 // CHECK11-NEXT:    br label [[COND_END]]
21216 // CHECK11:       cond.end:
21217 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
21218 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21219 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21220 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
21221 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21222 // CHECK11:       omp.inner.for.cond:
21223 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21224 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
21225 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
21226 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21227 // CHECK11:       omp.inner.for.body:
21228 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
21229 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
21230 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
21231 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21232 // CHECK11:       omp.inner.for.inc:
21233 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21234 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
21235 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
21236 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
21237 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
21238 // CHECK11:       omp.inner.for.end:
21239 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21240 // CHECK11:       omp.loop.exit:
21241 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21242 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
21243 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
21244 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21245 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
21246 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21247 // CHECK11:       .omp.final.then:
21248 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21249 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
21250 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
21251 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21252 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21253 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21254 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21255 // CHECK11:       .omp.final.done:
21256 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21257 // CHECK11:       omp.precond.end:
21258 // CHECK11-NEXT:    ret void
21259 //
21260 //
21261 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..31
21262 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21263 // CHECK11-NEXT:  entry:
21264 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21265 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21266 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21267 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21268 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21269 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21270 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21271 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21272 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21273 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21274 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21275 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21276 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21277 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21278 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21279 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21280 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21281 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21282 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21283 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21284 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21285 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21286 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21287 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21288 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21289 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21290 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21291 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21292 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21293 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21294 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21295 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21296 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21297 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21298 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21299 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21300 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21301 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21302 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21303 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21304 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21305 // CHECK11:       omp.precond.then:
21306 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21307 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21308 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21309 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21310 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21311 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21312 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21313 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21314 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21315 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21316 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21317 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21318 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21319 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21320 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21321 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21322 // CHECK11:       cond.true:
21323 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21324 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21325 // CHECK11:       cond.false:
21326 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21327 // CHECK11-NEXT:    br label [[COND_END]]
21328 // CHECK11:       cond.end:
21329 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21330 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21331 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21332 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21333 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21334 // CHECK11:       omp.inner.for.cond:
21335 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21336 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
21337 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21338 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21339 // CHECK11:       omp.inner.for.body:
21340 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21341 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21342 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21343 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
21344 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
21345 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21346 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21347 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
21348 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
21349 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21350 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21351 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
21352 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21353 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
21354 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
21355 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21356 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
21357 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21358 // CHECK11:       omp.body.continue:
21359 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21360 // CHECK11:       omp.inner.for.inc:
21361 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21362 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21363 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
21364 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
21365 // CHECK11:       omp.inner.for.end:
21366 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21367 // CHECK11:       omp.loop.exit:
21368 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21369 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21370 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21371 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21372 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21373 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21374 // CHECK11:       .omp.final.then:
21375 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21376 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21377 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21378 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21379 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21380 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21381 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21382 // CHECK11:       .omp.final.done:
21383 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21384 // CHECK11:       omp.precond.end:
21385 // CHECK11-NEXT:    ret void
21386 //
21387 //
21388 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
21389 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21390 // CHECK11-NEXT:  entry:
21391 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
21392 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21393 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21394 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21395 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21396 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
21397 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21398 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21399 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21400 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21401 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21402 // CHECK11-NEXT:    ret void
21403 //
21404 //
21405 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..34
21406 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21407 // CHECK11-NEXT:  entry:
21408 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21409 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21410 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
21411 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21412 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21413 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21414 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21415 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21416 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21417 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21418 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21419 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21420 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21421 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21422 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21423 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21424 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21425 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21426 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21427 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
21428 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21429 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21430 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21431 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21432 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
21433 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21434 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21435 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21436 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21437 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
21438 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
21439 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21440 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
21441 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21442 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21443 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21444 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21445 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21446 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
21447 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21448 // CHECK11:       omp.precond.then:
21449 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21450 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21451 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
21452 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21453 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21454 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
21455 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21456 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21457 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
21458 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21459 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21460 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21461 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21462 // CHECK11:       cond.true:
21463 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21464 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21465 // CHECK11:       cond.false:
21466 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21467 // CHECK11-NEXT:    br label [[COND_END]]
21468 // CHECK11:       cond.end:
21469 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21470 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21471 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21472 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21473 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21474 // CHECK11:       omp.inner.for.cond:
21475 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21476 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21477 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
21478 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
21479 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21480 // CHECK11:       omp.inner.for.body:
21481 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21482 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21483 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
21484 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21485 // CHECK11:       omp.inner.for.inc:
21486 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21487 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21488 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
21489 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21490 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21491 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21492 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
21493 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21494 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21495 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
21496 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
21497 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21498 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21499 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21500 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
21501 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
21502 // CHECK11:       cond.true10:
21503 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
21504 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
21505 // CHECK11:       cond.false11:
21506 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21507 // CHECK11-NEXT:    br label [[COND_END12]]
21508 // CHECK11:       cond.end12:
21509 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
21510 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
21511 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
21512 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
21513 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
21514 // CHECK11:       omp.inner.for.end:
21515 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21516 // CHECK11:       omp.loop.exit:
21517 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21518 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
21519 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
21520 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21521 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
21522 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21523 // CHECK11:       .omp.final.then:
21524 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21525 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
21526 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
21527 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
21528 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
21529 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
21530 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21531 // CHECK11:       .omp.final.done:
21532 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21533 // CHECK11:       omp.precond.end:
21534 // CHECK11-NEXT:    ret void
21535 //
21536 //
21537 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..35
21538 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21539 // CHECK11-NEXT:  entry:
21540 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21541 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21542 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21543 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21544 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21545 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21546 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21547 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21548 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21549 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21550 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21551 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21552 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21553 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21554 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21555 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21556 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21557 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21558 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21559 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21560 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21561 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21562 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21563 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21564 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21565 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21566 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21567 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21568 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21569 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21570 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21571 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21572 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21573 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21574 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21575 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21576 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21577 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21578 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21579 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21580 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21581 // CHECK11:       omp.precond.then:
21582 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21583 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21584 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21585 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21586 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21587 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21588 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21589 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21590 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21591 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21592 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21593 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21594 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21595 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21596 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21597 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21598 // CHECK11:       cond.true:
21599 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21600 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21601 // CHECK11:       cond.false:
21602 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21603 // CHECK11-NEXT:    br label [[COND_END]]
21604 // CHECK11:       cond.end:
21605 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21606 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21607 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21608 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21609 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21610 // CHECK11:       omp.inner.for.cond:
21611 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21612 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
21613 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21614 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21615 // CHECK11:       omp.inner.for.body:
21616 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21617 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21618 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21619 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
21620 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
21621 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21622 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21623 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
21624 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
21625 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21626 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21627 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
21628 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21629 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
21630 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
21631 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21632 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
21633 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21634 // CHECK11:       omp.body.continue:
21635 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21636 // CHECK11:       omp.inner.for.inc:
21637 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21638 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21639 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
21640 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
21641 // CHECK11:       omp.inner.for.end:
21642 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21643 // CHECK11:       omp.loop.exit:
21644 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21645 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21646 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21647 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21648 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21649 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21650 // CHECK11:       .omp.final.then:
21651 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21652 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21653 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21654 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21655 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21656 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21657 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21658 // CHECK11:       .omp.final.done:
21659 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21660 // CHECK11:       omp.precond.end:
21661 // CHECK11-NEXT:    ret void
21662 //
21663 //
21664 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
21665 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21666 // CHECK11-NEXT:  entry:
21667 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21668 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21669 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21670 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21671 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21672 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21673 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21674 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21675 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21676 // CHECK11-NEXT:    ret void
21677 //
21678 //
21679 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..38
21680 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21681 // CHECK11-NEXT:  entry:
21682 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21683 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21684 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21685 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21686 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21687 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21688 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21689 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21690 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21691 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21692 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21693 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21694 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21695 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21696 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21697 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21698 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21699 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21700 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21701 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21702 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21703 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21704 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21705 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21706 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21707 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21708 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21709 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21710 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21711 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21712 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21713 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21714 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21715 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21716 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21717 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21718 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21719 // CHECK11:       omp.precond.then:
21720 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21721 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21722 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
21723 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21724 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21725 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21726 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21727 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21728 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21729 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21730 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
21731 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21732 // CHECK11:       cond.true:
21733 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21734 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21735 // CHECK11:       cond.false:
21736 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21737 // CHECK11-NEXT:    br label [[COND_END]]
21738 // CHECK11:       cond.end:
21739 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
21740 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21741 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21742 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
21743 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21744 // CHECK11:       omp.inner.for.cond:
21745 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21746 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
21747 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
21748 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21749 // CHECK11:       omp.inner.for.body:
21750 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
21751 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
21752 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
21753 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21754 // CHECK11:       omp.inner.for.inc:
21755 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21756 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
21757 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
21758 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
21759 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
21760 // CHECK11:       omp.inner.for.end:
21761 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21762 // CHECK11:       omp.loop.exit:
21763 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21764 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
21765 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
21766 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21767 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
21768 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21769 // CHECK11:       .omp.final.then:
21770 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21771 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
21772 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
21773 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
21774 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
21775 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
21776 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21777 // CHECK11:       .omp.final.done:
21778 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21779 // CHECK11:       omp.precond.end:
21780 // CHECK11-NEXT:    ret void
21781 //
21782 //
21783 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..39
21784 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21785 // CHECK11-NEXT:  entry:
21786 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21787 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21788 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
21789 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
21790 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21791 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21792 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21793 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21794 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21795 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21796 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21797 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21798 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21799 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21800 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21801 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21802 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21803 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
21804 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21805 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21806 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21807 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21808 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21809 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21810 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21811 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21812 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21813 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21814 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21815 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21816 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
21817 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
21818 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21819 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
21820 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21821 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
21822 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21823 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21824 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21825 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
21826 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21827 // CHECK11:       omp.precond.then:
21828 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21829 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21830 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
21831 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
21832 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
21833 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
21834 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
21835 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21836 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21837 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21838 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21839 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21840 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21841 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21842 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21843 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21844 // CHECK11:       cond.true:
21845 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21846 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21847 // CHECK11:       cond.false:
21848 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21849 // CHECK11-NEXT:    br label [[COND_END]]
21850 // CHECK11:       cond.end:
21851 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21852 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21853 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21854 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21855 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21856 // CHECK11:       omp.inner.for.cond:
21857 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21858 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
21859 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
21860 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21861 // CHECK11:       omp.inner.for.body:
21862 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21863 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
21864 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21865 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
21866 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
21867 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21868 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
21869 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
21870 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
21871 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21872 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
21873 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
21874 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
21875 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
21876 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
21877 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
21878 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
21879 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21880 // CHECK11:       omp.body.continue:
21881 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21882 // CHECK11:       omp.inner.for.inc:
21883 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21884 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
21885 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
21886 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
21887 // CHECK11:       omp.inner.for.end:
21888 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21889 // CHECK11:       omp.loop.exit:
21890 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21891 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21892 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21893 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21894 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21895 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21896 // CHECK11:       .omp.final.then:
21897 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21898 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
21899 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
21900 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
21901 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
21902 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
21903 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21904 // CHECK11:       .omp.final.done:
21905 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
21906 // CHECK11:       omp.precond.end:
21907 // CHECK11-NEXT:    ret void
21908 //
21909 //
21910 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
21911 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
21912 // CHECK11-NEXT:  entry:
21913 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
21914 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21915 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
21916 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
21917 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
21918 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
21919 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21920 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
21921 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
21922 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
21923 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
21924 // CHECK11-NEXT:    ret void
21925 //
21926 //
21927 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..42
21928 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
21929 // CHECK11-NEXT:  entry:
21930 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21931 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21932 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
21933 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
21934 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
21935 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
21936 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
21937 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21938 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21939 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21940 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
21941 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21942 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
21943 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
21944 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
21945 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21946 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21947 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
21948 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
21949 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21950 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21951 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
21952 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
21953 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
21954 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
21955 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
21956 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
21957 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
21958 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
21959 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
21960 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
21961 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
21962 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
21963 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
21964 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
21965 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21966 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
21967 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
21968 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
21969 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21970 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
21971 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
21972 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
21973 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21974 // CHECK11:       omp.precond.then:
21975 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
21976 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21977 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
21978 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21979 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21980 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21981 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
21982 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21983 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21984 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21985 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
21986 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21987 // CHECK11:       cond.true:
21988 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21989 // CHECK11-NEXT:    br label [[COND_END:%.*]]
21990 // CHECK11:       cond.false:
21991 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
21992 // CHECK11-NEXT:    br label [[COND_END]]
21993 // CHECK11:       cond.end:
21994 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
21995 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
21996 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
21997 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
21998 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21999 // CHECK11:       omp.inner.for.cond:
22000 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
22001 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
22002 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
22003 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22004 // CHECK11:       omp.inner.for.body:
22005 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
22006 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
22007 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
22008 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
22009 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
22010 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
22011 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22012 // CHECK11:       omp.inner.for.inc:
22013 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
22014 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
22015 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
22016 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
22017 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
22018 // CHECK11:       omp.inner.for.end:
22019 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22020 // CHECK11:       omp.loop.exit:
22021 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22022 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
22023 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
22024 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22025 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
22026 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22027 // CHECK11:       .omp.final.then:
22028 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22029 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
22030 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22031 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22032 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22033 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
22034 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22035 // CHECK11:       .omp.final.done:
22036 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22037 // CHECK11:       omp.precond.end:
22038 // CHECK11-NEXT:    ret void
22039 //
22040 //
22041 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..43
22042 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22043 // CHECK11-NEXT:  entry:
22044 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22045 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22046 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22047 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22048 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22049 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22050 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22051 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22052 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22053 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22054 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22055 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22056 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22057 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22058 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22059 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22060 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22061 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22062 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22063 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22064 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22065 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22066 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22067 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22068 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22069 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22070 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22071 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22072 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22073 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22074 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22075 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22076 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22077 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22078 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22079 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22080 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22081 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22082 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22083 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22084 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22085 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22086 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22087 // CHECK11:       omp.precond.then:
22088 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22089 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22090 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22091 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22092 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22093 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22094 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22095 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22096 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22097 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22098 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22099 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
22100 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
22101 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22102 // CHECK11:       omp.dispatch.cond:
22103 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22104 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22105 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
22106 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22107 // CHECK11:       cond.true:
22108 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22109 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22110 // CHECK11:       cond.false:
22111 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22112 // CHECK11-NEXT:    br label [[COND_END]]
22113 // CHECK11:       cond.end:
22114 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
22115 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22116 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22117 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
22118 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22119 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22120 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
22121 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22122 // CHECK11:       omp.dispatch.body:
22123 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22124 // CHECK11:       omp.inner.for.cond:
22125 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22126 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
22127 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
22128 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22129 // CHECK11:       omp.inner.for.body:
22130 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22131 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
22132 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22133 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
22134 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
22135 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22136 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
22137 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
22138 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
22139 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22140 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
22141 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
22142 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
22143 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
22144 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
22145 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
22146 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
22147 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22148 // CHECK11:       omp.body.continue:
22149 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22150 // CHECK11:       omp.inner.for.inc:
22151 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22152 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
22153 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
22154 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
22155 // CHECK11:       omp.inner.for.end:
22156 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22157 // CHECK11:       omp.dispatch.inc:
22158 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22159 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22160 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
22161 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
22162 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22163 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22164 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
22165 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
22166 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22167 // CHECK11:       omp.dispatch.end:
22168 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22169 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
22170 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
22171 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22172 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
22173 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22174 // CHECK11:       .omp.final.then:
22175 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22176 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
22177 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
22178 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
22179 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
22180 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
22181 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22182 // CHECK11:       .omp.final.done:
22183 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22184 // CHECK11:       omp.precond.end:
22185 // CHECK11-NEXT:    ret void
22186 //
22187 //
22188 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
22189 // CHECK11-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
22190 // CHECK11-NEXT:  entry:
22191 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22192 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
22193 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
22194 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
22195 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22196 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
22197 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
22198 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
22199 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
22200 // CHECK11-NEXT:    ret void
22201 //
22202 //
22203 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..46
22204 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22205 // CHECK11-NEXT:  entry:
22206 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22207 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22208 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22209 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22210 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22211 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22212 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22213 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22214 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22215 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22216 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22217 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22218 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22219 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22220 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22221 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
22222 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22223 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22224 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22225 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22226 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22227 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22228 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22229 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22230 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22231 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22232 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22233 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
22234 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22235 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22236 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22237 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22238 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22239 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22240 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22241 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22242 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22243 // CHECK11:       omp.precond.then:
22244 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22245 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22246 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
22247 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22248 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22249 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22250 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22251 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22252 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22253 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22254 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
22255 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22256 // CHECK11:       cond.true:
22257 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22258 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22259 // CHECK11:       cond.false:
22260 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22261 // CHECK11-NEXT:    br label [[COND_END]]
22262 // CHECK11:       cond.end:
22263 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
22264 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22265 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22266 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
22267 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22268 // CHECK11:       omp.inner.for.cond:
22269 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22270 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
22271 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
22272 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22273 // CHECK11:       omp.inner.for.body:
22274 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
22275 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
22276 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
22277 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22278 // CHECK11:       omp.inner.for.inc:
22279 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22280 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
22281 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
22282 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
22283 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
22284 // CHECK11:       omp.inner.for.end:
22285 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22286 // CHECK11:       omp.loop.exit:
22287 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22288 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
22289 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
22290 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22291 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
22292 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22293 // CHECK11:       .omp.final.then:
22294 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22295 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
22296 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
22297 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
22298 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
22299 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
22300 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22301 // CHECK11:       .omp.final.done:
22302 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22303 // CHECK11:       omp.precond.end:
22304 // CHECK11-NEXT:    ret void
22305 //
22306 //
22307 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..47
22308 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22309 // CHECK11-NEXT:  entry:
22310 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22311 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22312 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22313 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22314 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22315 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22316 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22317 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22318 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22319 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22320 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22321 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22322 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22323 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22324 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22325 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22326 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22327 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
22328 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22329 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22330 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22331 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22332 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22333 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22334 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22335 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22336 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22337 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22338 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22339 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22340 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22341 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
22342 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22343 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22344 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22345 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22346 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22347 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22348 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22349 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22350 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22351 // CHECK11:       omp.precond.then:
22352 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22353 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22354 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22355 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22356 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22357 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22358 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22359 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22360 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22361 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22362 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22363 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22364 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
22365 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
22366 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22367 // CHECK11:       omp.dispatch.cond:
22368 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22369 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
22370 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
22371 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
22372 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22373 // CHECK11:       omp.dispatch.body:
22374 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22375 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
22376 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22377 // CHECK11:       omp.inner.for.cond:
22378 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22379 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
22380 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
22381 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22382 // CHECK11:       omp.inner.for.body:
22383 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22384 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
22385 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22386 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
22387 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
22388 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22389 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
22390 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
22391 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
22392 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22393 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
22394 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
22395 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
22396 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
22397 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
22398 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
22399 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
22400 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22401 // CHECK11:       omp.body.continue:
22402 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22403 // CHECK11:       omp.inner.for.inc:
22404 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22405 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
22406 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
22407 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
22408 // CHECK11:       omp.inner.for.end:
22409 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22410 // CHECK11:       omp.dispatch.inc:
22411 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22412 // CHECK11:       omp.dispatch.end:
22413 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22414 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
22415 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22416 // CHECK11:       .omp.final.then:
22417 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22418 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
22419 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
22420 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
22421 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
22422 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
22423 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22424 // CHECK11:       .omp.final.done:
22425 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22426 // CHECK11:       omp.precond.end:
22427 // CHECK11-NEXT:    ret void
22428 //
22429 //
22430 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
22431 // CHECK11-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
22432 // CHECK11-NEXT:  entry:
22433 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
22434 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
22435 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
22436 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
22437 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
22438 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
22439 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
22440 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
22441 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
22442 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
22443 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
22444 // CHECK11-NEXT:    ret void
22445 //
22446 //
22447 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..50
22448 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
22449 // CHECK11-NEXT:  entry:
22450 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22451 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22452 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
22453 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22454 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22455 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22456 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22457 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22458 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22459 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22460 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22461 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22462 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22463 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
22464 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
22465 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22466 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22467 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22468 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
22469 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22470 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22471 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
22472 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22473 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22474 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22475 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22476 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
22477 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22478 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22479 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22480 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22481 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
22482 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
22483 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
22484 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22485 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22486 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
22487 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22488 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22489 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22490 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22491 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22492 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
22493 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22494 // CHECK11:       omp.precond.then:
22495 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
22496 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22497 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
22498 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22499 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22500 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22501 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
22502 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22503 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22504 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22505 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
22506 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22507 // CHECK11:       cond.true:
22508 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22509 // CHECK11-NEXT:    br label [[COND_END:%.*]]
22510 // CHECK11:       cond.false:
22511 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
22512 // CHECK11-NEXT:    br label [[COND_END]]
22513 // CHECK11:       cond.end:
22514 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
22515 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
22516 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
22517 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
22518 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22519 // CHECK11:       omp.inner.for.cond:
22520 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22521 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
22522 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
22523 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22524 // CHECK11:       omp.inner.for.body:
22525 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
22526 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
22527 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
22528 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
22529 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
22530 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
22531 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22532 // CHECK11:       omp.inner.for.inc:
22533 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22534 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
22535 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
22536 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
22537 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
22538 // CHECK11:       omp.inner.for.end:
22539 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22540 // CHECK11:       omp.loop.exit:
22541 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22542 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
22543 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
22544 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22545 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
22546 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22547 // CHECK11:       .omp.final.then:
22548 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22549 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
22550 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
22551 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
22552 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
22553 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
22554 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22555 // CHECK11:       .omp.final.done:
22556 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22557 // CHECK11:       omp.precond.end:
22558 // CHECK11-NEXT:    ret void
22559 //
22560 //
22561 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..51
22562 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22563 // CHECK11-NEXT:  entry:
22564 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22565 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22566 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
22567 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
22568 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
22569 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
22570 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
22571 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
22572 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22573 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22574 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22575 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22576 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
22577 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
22578 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22579 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22580 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22581 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22582 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
22583 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22584 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22585 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22586 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22587 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
22588 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
22589 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
22590 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
22591 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22592 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
22593 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
22594 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
22595 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
22596 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
22597 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22598 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22599 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
22600 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22601 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
22602 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
22603 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
22604 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22605 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
22606 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22607 // CHECK11:       omp.precond.then:
22608 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22609 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
22610 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
22611 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
22612 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
22613 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
22614 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
22615 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22616 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22617 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22618 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22619 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22620 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22621 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
22622 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
22623 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22624 // CHECK11:       omp.dispatch.cond:
22625 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22626 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
22627 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
22628 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
22629 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22630 // CHECK11:       omp.dispatch.body:
22631 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22632 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
22633 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22634 // CHECK11:       omp.inner.for.cond:
22635 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22636 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
22637 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
22638 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22639 // CHECK11:       omp.inner.for.body:
22640 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22641 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
22642 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22643 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
22644 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
22645 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22646 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
22647 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
22648 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
22649 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22650 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
22651 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
22652 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
22653 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
22654 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
22655 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
22656 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
22657 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22658 // CHECK11:       omp.body.continue:
22659 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22660 // CHECK11:       omp.inner.for.inc:
22661 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22662 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
22663 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
22664 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
22665 // CHECK11:       omp.inner.for.end:
22666 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22667 // CHECK11:       omp.dispatch.inc:
22668 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
22669 // CHECK11:       omp.dispatch.end:
22670 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22671 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22672 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22673 // CHECK11:       .omp.final.then:
22674 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22675 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
22676 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
22677 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
22678 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
22679 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
22680 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22681 // CHECK11:       .omp.final.done:
22682 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
22683 // CHECK11:       omp.precond.end:
22684 // CHECK11-NEXT:    ret void
22685 //
22686 //
22687 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
22688 // CHECK11-SAME: () #[[ATTR4:[0-9]+]] {
22689 // CHECK11-NEXT:  entry:
22690 // CHECK11-NEXT:    call void @__tgt_register_requires(i64 1)
22691 // CHECK11-NEXT:    ret void
22692 //
22693 //
22694 // CHECK12-LABEL: define {{[^@]+}}@main
22695 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
22696 // CHECK12-NEXT:  entry:
22697 // CHECK12-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
22698 // CHECK12-NEXT:    [[A:%.*]] = alloca double*, align 4
22699 // CHECK12-NEXT:    [[B:%.*]] = alloca double*, align 4
22700 // CHECK12-NEXT:    [[C:%.*]] = alloca double*, align 4
22701 // CHECK12-NEXT:    [[N:%.*]] = alloca i32, align 4
22702 // CHECK12-NEXT:    [[CH:%.*]] = alloca i32, align 4
22703 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
22704 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
22705 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
22706 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
22707 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22708 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22709 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
22710 // CHECK12-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
22711 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
22712 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
22713 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
22714 // CHECK12-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
22715 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
22716 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
22717 // CHECK12-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
22718 // CHECK12-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
22719 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
22720 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
22721 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
22722 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
22723 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
22724 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
22725 // CHECK12-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
22726 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
22727 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
22728 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
22729 // CHECK12-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
22730 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
22731 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
22732 // CHECK12-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
22733 // CHECK12-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
22734 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
22735 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
22736 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
22737 // CHECK12-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
22738 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
22739 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
22740 // CHECK12-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
22741 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
22742 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
22743 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
22744 // CHECK12-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
22745 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
22746 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
22747 // CHECK12-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
22748 // CHECK12-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
22749 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
22750 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
22751 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
22752 // CHECK12-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
22753 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
22754 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
22755 // CHECK12-NEXT:    store i32 0, i32* [[RETVAL]], align 4
22756 // CHECK12-NEXT:    store i32 10000, i32* [[N]], align 4
22757 // CHECK12-NEXT:    store i32 100, i32* [[CH]], align 4
22758 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
22759 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
22760 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
22761 // CHECK12-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 4
22762 // CHECK12-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 4
22763 // CHECK12-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 4
22764 // CHECK12-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22765 // CHECK12-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
22766 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
22767 // CHECK12-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22768 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
22769 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
22770 // CHECK12-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
22771 // CHECK12-NEXT:    store i8* null, i8** [[TMP9]], align 4
22772 // CHECK12-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
22773 // CHECK12-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
22774 // CHECK12-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 4
22775 // CHECK12-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
22776 // CHECK12-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
22777 // CHECK12-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 4
22778 // CHECK12-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
22779 // CHECK12-NEXT:    store i8* null, i8** [[TMP14]], align 4
22780 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
22781 // CHECK12-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
22782 // CHECK12-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 4
22783 // CHECK12-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
22784 // CHECK12-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
22785 // CHECK12-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 4
22786 // CHECK12-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
22787 // CHECK12-NEXT:    store i8* null, i8** [[TMP19]], align 4
22788 // CHECK12-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
22789 // CHECK12-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
22790 // CHECK12-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 4
22791 // CHECK12-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
22792 // CHECK12-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
22793 // CHECK12-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 4
22794 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
22795 // CHECK12-NEXT:    store i8* null, i8** [[TMP24]], align 4
22796 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
22797 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
22798 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
22799 // CHECK12-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
22800 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22801 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
22802 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
22803 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
22804 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
22805 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
22806 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
22807 // CHECK12-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
22808 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
22809 // CHECK12-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22810 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22811 // CHECK12-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
22812 // CHECK12:       omp_offload.failed:
22813 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
22814 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT]]
22815 // CHECK12:       omp_offload.cont:
22816 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
22817 // CHECK12-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
22818 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
22819 // CHECK12-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 4
22820 // CHECK12-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 4
22821 // CHECK12-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 4
22822 // CHECK12-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
22823 // CHECK12-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
22824 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
22825 // CHECK12-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
22826 // CHECK12-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
22827 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
22828 // CHECK12-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
22829 // CHECK12-NEXT:    store i8* null, i8** [[TMP42]], align 4
22830 // CHECK12-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
22831 // CHECK12-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
22832 // CHECK12-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 4
22833 // CHECK12-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
22834 // CHECK12-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
22835 // CHECK12-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 4
22836 // CHECK12-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
22837 // CHECK12-NEXT:    store i8* null, i8** [[TMP47]], align 4
22838 // CHECK12-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
22839 // CHECK12-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
22840 // CHECK12-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 4
22841 // CHECK12-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
22842 // CHECK12-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
22843 // CHECK12-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 4
22844 // CHECK12-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
22845 // CHECK12-NEXT:    store i8* null, i8** [[TMP52]], align 4
22846 // CHECK12-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
22847 // CHECK12-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
22848 // CHECK12-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 4
22849 // CHECK12-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
22850 // CHECK12-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
22851 // CHECK12-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 4
22852 // CHECK12-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
22853 // CHECK12-NEXT:    store i8* null, i8** [[TMP57]], align 4
22854 // CHECK12-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
22855 // CHECK12-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
22856 // CHECK12-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
22857 // CHECK12-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
22858 // CHECK12-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
22859 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
22860 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
22861 // CHECK12-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
22862 // CHECK12-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
22863 // CHECK12-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
22864 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
22865 // CHECK12-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
22866 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
22867 // CHECK12-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22868 // CHECK12-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
22869 // CHECK12-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
22870 // CHECK12:       omp_offload.failed14:
22871 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
22872 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
22873 // CHECK12:       omp_offload.cont15:
22874 // CHECK12-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
22875 // CHECK12-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
22876 // CHECK12-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
22877 // CHECK12-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
22878 // CHECK12-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
22879 // CHECK12-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
22880 // CHECK12-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 4
22881 // CHECK12-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 4
22882 // CHECK12-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 4
22883 // CHECK12-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
22884 // CHECK12-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
22885 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
22886 // CHECK12-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
22887 // CHECK12-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
22888 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
22889 // CHECK12-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
22890 // CHECK12-NEXT:    store i8* null, i8** [[TMP77]], align 4
22891 // CHECK12-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
22892 // CHECK12-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
22893 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
22894 // CHECK12-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
22895 // CHECK12-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
22896 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
22897 // CHECK12-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
22898 // CHECK12-NEXT:    store i8* null, i8** [[TMP82]], align 4
22899 // CHECK12-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
22900 // CHECK12-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
22901 // CHECK12-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 4
22902 // CHECK12-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
22903 // CHECK12-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
22904 // CHECK12-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 4
22905 // CHECK12-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
22906 // CHECK12-NEXT:    store i8* null, i8** [[TMP87]], align 4
22907 // CHECK12-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
22908 // CHECK12-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
22909 // CHECK12-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 4
22910 // CHECK12-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
22911 // CHECK12-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
22912 // CHECK12-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 4
22913 // CHECK12-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
22914 // CHECK12-NEXT:    store i8* null, i8** [[TMP92]], align 4
22915 // CHECK12-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
22916 // CHECK12-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
22917 // CHECK12-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 4
22918 // CHECK12-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
22919 // CHECK12-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
22920 // CHECK12-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 4
22921 // CHECK12-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
22922 // CHECK12-NEXT:    store i8* null, i8** [[TMP97]], align 4
22923 // CHECK12-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
22924 // CHECK12-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
22925 // CHECK12-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
22926 // CHECK12-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
22927 // CHECK12-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
22928 // CHECK12-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
22929 // CHECK12-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
22930 // CHECK12-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
22931 // CHECK12-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
22932 // CHECK12-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
22933 // CHECK12-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
22934 // CHECK12-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
22935 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
22936 // CHECK12-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22937 // CHECK12-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
22938 // CHECK12-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
22939 // CHECK12:       omp_offload.failed27:
22940 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP67]], i32 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
22941 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
22942 // CHECK12:       omp_offload.cont28:
22943 // CHECK12-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
22944 // CHECK12-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
22945 // CHECK12-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
22946 // CHECK12-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 4
22947 // CHECK12-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 4
22948 // CHECK12-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 4
22949 // CHECK12-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
22950 // CHECK12-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
22951 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
22952 // CHECK12-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
22953 // CHECK12-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
22954 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
22955 // CHECK12-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
22956 // CHECK12-NEXT:    store i8* null, i8** [[TMP115]], align 4
22957 // CHECK12-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
22958 // CHECK12-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
22959 // CHECK12-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 4
22960 // CHECK12-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
22961 // CHECK12-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
22962 // CHECK12-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 4
22963 // CHECK12-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
22964 // CHECK12-NEXT:    store i8* null, i8** [[TMP120]], align 4
22965 // CHECK12-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
22966 // CHECK12-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
22967 // CHECK12-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 4
22968 // CHECK12-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
22969 // CHECK12-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
22970 // CHECK12-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 4
22971 // CHECK12-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
22972 // CHECK12-NEXT:    store i8* null, i8** [[TMP125]], align 4
22973 // CHECK12-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
22974 // CHECK12-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
22975 // CHECK12-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 4
22976 // CHECK12-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
22977 // CHECK12-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
22978 // CHECK12-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 4
22979 // CHECK12-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
22980 // CHECK12-NEXT:    store i8* null, i8** [[TMP130]], align 4
22981 // CHECK12-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
22982 // CHECK12-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
22983 // CHECK12-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
22984 // CHECK12-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
22985 // CHECK12-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
22986 // CHECK12-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
22987 // CHECK12-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
22988 // CHECK12-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
22989 // CHECK12-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
22990 // CHECK12-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
22991 // CHECK12-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
22992 // CHECK12-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
22993 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
22994 // CHECK12-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
22995 // CHECK12-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
22996 // CHECK12-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
22997 // CHECK12:       omp_offload.failed40:
22998 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
22999 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
23000 // CHECK12:       omp_offload.cont41:
23001 // CHECK12-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
23002 // CHECK12-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
23003 // CHECK12-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
23004 // CHECK12-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
23005 // CHECK12-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
23006 // CHECK12-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
23007 // CHECK12-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 4
23008 // CHECK12-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 4
23009 // CHECK12-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 4
23010 // CHECK12-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
23011 // CHECK12-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
23012 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
23013 // CHECK12-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
23014 // CHECK12-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
23015 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
23016 // CHECK12-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
23017 // CHECK12-NEXT:    store i8* null, i8** [[TMP150]], align 4
23018 // CHECK12-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
23019 // CHECK12-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
23020 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
23021 // CHECK12-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
23022 // CHECK12-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
23023 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
23024 // CHECK12-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
23025 // CHECK12-NEXT:    store i8* null, i8** [[TMP155]], align 4
23026 // CHECK12-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
23027 // CHECK12-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
23028 // CHECK12-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 4
23029 // CHECK12-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
23030 // CHECK12-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
23031 // CHECK12-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 4
23032 // CHECK12-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
23033 // CHECK12-NEXT:    store i8* null, i8** [[TMP160]], align 4
23034 // CHECK12-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
23035 // CHECK12-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
23036 // CHECK12-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 4
23037 // CHECK12-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
23038 // CHECK12-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
23039 // CHECK12-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 4
23040 // CHECK12-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
23041 // CHECK12-NEXT:    store i8* null, i8** [[TMP165]], align 4
23042 // CHECK12-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
23043 // CHECK12-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
23044 // CHECK12-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 4
23045 // CHECK12-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
23046 // CHECK12-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
23047 // CHECK12-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 4
23048 // CHECK12-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
23049 // CHECK12-NEXT:    store i8* null, i8** [[TMP170]], align 4
23050 // CHECK12-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
23051 // CHECK12-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
23052 // CHECK12-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
23053 // CHECK12-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
23054 // CHECK12-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
23055 // CHECK12-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
23056 // CHECK12-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
23057 // CHECK12-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
23058 // CHECK12-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
23059 // CHECK12-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
23060 // CHECK12-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
23061 // CHECK12-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
23062 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
23063 // CHECK12-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23064 // CHECK12-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
23065 // CHECK12-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
23066 // CHECK12:       omp_offload.failed54:
23067 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP140]], i32 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
23068 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
23069 // CHECK12:       omp_offload.cont55:
23070 // CHECK12-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
23071 // CHECK12-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
23072 // CHECK12-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
23073 // CHECK12-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 4
23074 // CHECK12-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 4
23075 // CHECK12-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 4
23076 // CHECK12-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
23077 // CHECK12-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
23078 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
23079 // CHECK12-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
23080 // CHECK12-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
23081 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
23082 // CHECK12-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
23083 // CHECK12-NEXT:    store i8* null, i8** [[TMP188]], align 4
23084 // CHECK12-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
23085 // CHECK12-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
23086 // CHECK12-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 4
23087 // CHECK12-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
23088 // CHECK12-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
23089 // CHECK12-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 4
23090 // CHECK12-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
23091 // CHECK12-NEXT:    store i8* null, i8** [[TMP193]], align 4
23092 // CHECK12-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
23093 // CHECK12-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
23094 // CHECK12-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 4
23095 // CHECK12-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
23096 // CHECK12-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
23097 // CHECK12-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 4
23098 // CHECK12-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
23099 // CHECK12-NEXT:    store i8* null, i8** [[TMP198]], align 4
23100 // CHECK12-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
23101 // CHECK12-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
23102 // CHECK12-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 4
23103 // CHECK12-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
23104 // CHECK12-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
23105 // CHECK12-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 4
23106 // CHECK12-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
23107 // CHECK12-NEXT:    store i8* null, i8** [[TMP203]], align 4
23108 // CHECK12-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
23109 // CHECK12-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
23110 // CHECK12-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
23111 // CHECK12-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
23112 // CHECK12-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
23113 // CHECK12-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
23114 // CHECK12-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
23115 // CHECK12-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
23116 // CHECK12-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
23117 // CHECK12-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
23118 // CHECK12-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
23119 // CHECK12-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
23120 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
23121 // CHECK12-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23122 // CHECK12-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
23123 // CHECK12-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
23124 // CHECK12:       omp_offload.failed67:
23125 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
23126 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
23127 // CHECK12:       omp_offload.cont68:
23128 // CHECK12-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
23129 // CHECK12-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
23130 // CHECK12-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
23131 // CHECK12-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
23132 // CHECK12-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
23133 // CHECK12-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
23134 // CHECK12-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 4
23135 // CHECK12-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 4
23136 // CHECK12-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 4
23137 // CHECK12-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
23138 // CHECK12-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
23139 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
23140 // CHECK12-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
23141 // CHECK12-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
23142 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
23143 // CHECK12-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
23144 // CHECK12-NEXT:    store i8* null, i8** [[TMP223]], align 4
23145 // CHECK12-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
23146 // CHECK12-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
23147 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
23148 // CHECK12-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
23149 // CHECK12-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
23150 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
23151 // CHECK12-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
23152 // CHECK12-NEXT:    store i8* null, i8** [[TMP228]], align 4
23153 // CHECK12-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
23154 // CHECK12-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
23155 // CHECK12-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 4
23156 // CHECK12-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
23157 // CHECK12-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
23158 // CHECK12-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 4
23159 // CHECK12-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
23160 // CHECK12-NEXT:    store i8* null, i8** [[TMP233]], align 4
23161 // CHECK12-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
23162 // CHECK12-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
23163 // CHECK12-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 4
23164 // CHECK12-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
23165 // CHECK12-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
23166 // CHECK12-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 4
23167 // CHECK12-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
23168 // CHECK12-NEXT:    store i8* null, i8** [[TMP238]], align 4
23169 // CHECK12-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
23170 // CHECK12-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
23171 // CHECK12-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 4
23172 // CHECK12-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
23173 // CHECK12-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
23174 // CHECK12-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 4
23175 // CHECK12-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
23176 // CHECK12-NEXT:    store i8* null, i8** [[TMP243]], align 4
23177 // CHECK12-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
23178 // CHECK12-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
23179 // CHECK12-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
23180 // CHECK12-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
23181 // CHECK12-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
23182 // CHECK12-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
23183 // CHECK12-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
23184 // CHECK12-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
23185 // CHECK12-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
23186 // CHECK12-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
23187 // CHECK12-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
23188 // CHECK12-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
23189 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
23190 // CHECK12-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
23191 // CHECK12-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
23192 // CHECK12-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
23193 // CHECK12:       omp_offload.failed81:
23194 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP213]], i32 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
23195 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
23196 // CHECK12:       omp_offload.cont82:
23197 // CHECK12-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
23198 // CHECK12-NEXT:    ret i32 [[CALL]]
23199 //
23200 //
23201 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
23202 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1:[0-9]+]] {
23203 // CHECK12-NEXT:  entry:
23204 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23205 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23206 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23207 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23208 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23209 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23210 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23211 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23212 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23213 // CHECK12-NEXT:    ret void
23214 //
23215 //
23216 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
23217 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23218 // CHECK12-NEXT:  entry:
23219 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23220 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23221 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23222 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23223 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23224 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23225 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23226 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23227 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23228 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23229 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23230 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23231 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23232 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23233 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23234 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23235 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23236 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23237 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23238 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23239 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23240 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23241 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23242 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23243 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23244 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23245 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23246 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23247 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23248 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23249 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23250 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23251 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23252 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23253 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23254 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23255 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23256 // CHECK12:       omp.precond.then:
23257 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23258 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23259 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
23260 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23261 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23262 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23263 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23264 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23265 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23266 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23267 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
23268 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23269 // CHECK12:       cond.true:
23270 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23271 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23272 // CHECK12:       cond.false:
23273 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23274 // CHECK12-NEXT:    br label [[COND_END]]
23275 // CHECK12:       cond.end:
23276 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
23277 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23278 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23279 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
23280 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23281 // CHECK12:       omp.inner.for.cond:
23282 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23283 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
23284 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
23285 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23286 // CHECK12:       omp.inner.for.body:
23287 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
23288 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
23289 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
23290 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23291 // CHECK12:       omp.inner.for.inc:
23292 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23293 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
23294 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
23295 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23296 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
23297 // CHECK12:       omp.inner.for.end:
23298 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23299 // CHECK12:       omp.loop.exit:
23300 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23301 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
23302 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
23303 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23304 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
23305 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23306 // CHECK12:       .omp.final.then:
23307 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23308 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
23309 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
23310 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
23311 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
23312 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
23313 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23314 // CHECK12:       .omp.final.done:
23315 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23316 // CHECK12:       omp.precond.end:
23317 // CHECK12-NEXT:    ret void
23318 //
23319 //
23320 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
23321 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23322 // CHECK12-NEXT:  entry:
23323 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23324 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23325 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23326 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23327 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23328 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23329 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23330 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23331 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23332 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23333 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23334 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23335 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23336 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23337 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23338 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23339 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23340 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23341 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23342 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23343 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23344 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23345 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23346 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23347 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23348 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23349 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23350 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23351 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23352 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23353 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23354 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23355 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23356 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23357 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23358 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23359 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23360 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23361 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23362 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23363 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23364 // CHECK12:       omp.precond.then:
23365 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23366 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23367 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23368 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23369 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23370 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23371 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23372 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23373 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23374 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23375 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23376 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23377 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23378 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23379 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23380 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23381 // CHECK12:       cond.true:
23382 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23383 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23384 // CHECK12:       cond.false:
23385 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23386 // CHECK12-NEXT:    br label [[COND_END]]
23387 // CHECK12:       cond.end:
23388 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23389 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23390 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23391 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23392 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23393 // CHECK12:       omp.inner.for.cond:
23394 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23395 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
23396 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23397 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23398 // CHECK12:       omp.inner.for.body:
23399 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23400 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23401 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23402 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
23403 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
23404 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23405 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23406 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
23407 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
23408 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23409 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23410 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
23411 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23412 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
23413 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
23414 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23415 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
23416 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23417 // CHECK12:       omp.body.continue:
23418 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23419 // CHECK12:       omp.inner.for.inc:
23420 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23421 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23422 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
23423 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
23424 // CHECK12:       omp.inner.for.end:
23425 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23426 // CHECK12:       omp.loop.exit:
23427 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23428 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23429 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23430 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23431 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23432 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23433 // CHECK12:       .omp.final.then:
23434 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23435 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23436 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23437 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23438 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23439 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23440 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23441 // CHECK12:       .omp.final.done:
23442 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23443 // CHECK12:       omp.precond.end:
23444 // CHECK12-NEXT:    ret void
23445 //
23446 //
23447 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
23448 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23449 // CHECK12-NEXT:  entry:
23450 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23451 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23452 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23453 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23454 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23455 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23456 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23457 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23458 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23459 // CHECK12-NEXT:    ret void
23460 //
23461 //
23462 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2
23463 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23464 // CHECK12-NEXT:  entry:
23465 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23466 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23467 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23468 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23469 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23470 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23471 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23472 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23473 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23474 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23475 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23476 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23477 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23478 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23479 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23480 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23481 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23482 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23483 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23484 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23485 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23486 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23487 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23488 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23489 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23490 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23491 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23492 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23493 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23494 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23495 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23496 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23497 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23498 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23499 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23500 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23501 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23502 // CHECK12:       omp.precond.then:
23503 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23504 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23505 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
23506 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23507 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23508 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23509 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23510 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23511 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23512 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23513 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
23514 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23515 // CHECK12:       cond.true:
23516 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23517 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23518 // CHECK12:       cond.false:
23519 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23520 // CHECK12-NEXT:    br label [[COND_END]]
23521 // CHECK12:       cond.end:
23522 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
23523 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23524 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23525 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
23526 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23527 // CHECK12:       omp.inner.for.cond:
23528 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23529 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
23530 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
23531 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23532 // CHECK12:       omp.inner.for.body:
23533 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
23534 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
23535 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
23536 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23537 // CHECK12:       omp.inner.for.inc:
23538 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23539 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
23540 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
23541 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23542 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
23543 // CHECK12:       omp.inner.for.end:
23544 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23545 // CHECK12:       omp.loop.exit:
23546 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23547 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
23548 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
23549 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23550 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
23551 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23552 // CHECK12:       .omp.final.then:
23553 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23554 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
23555 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
23556 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
23557 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
23558 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
23559 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23560 // CHECK12:       .omp.final.done:
23561 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23562 // CHECK12:       omp.precond.end:
23563 // CHECK12-NEXT:    ret void
23564 //
23565 //
23566 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3
23567 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23568 // CHECK12-NEXT:  entry:
23569 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23570 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23571 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23572 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23573 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23574 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23575 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23576 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23577 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23578 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23579 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23580 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23581 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23582 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23583 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23584 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23585 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23586 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23587 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23588 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23589 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23590 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23591 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23592 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23593 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23594 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23595 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23596 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23597 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23598 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23599 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23600 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23601 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23602 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23603 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23604 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23605 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23606 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23607 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23608 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23609 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23610 // CHECK12:       omp.precond.then:
23611 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23612 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23613 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23614 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23615 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23616 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23617 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23618 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23619 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23620 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23621 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23622 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23623 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23624 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23625 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23626 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23627 // CHECK12:       cond.true:
23628 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23629 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23630 // CHECK12:       cond.false:
23631 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23632 // CHECK12-NEXT:    br label [[COND_END]]
23633 // CHECK12:       cond.end:
23634 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23635 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23636 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23637 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23638 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23639 // CHECK12:       omp.inner.for.cond:
23640 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23641 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
23642 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23643 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23644 // CHECK12:       omp.inner.for.body:
23645 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23646 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23647 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23648 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
23649 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
23650 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23651 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23652 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
23653 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
23654 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23655 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23656 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
23657 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23658 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
23659 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
23660 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23661 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
23662 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23663 // CHECK12:       omp.body.continue:
23664 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23665 // CHECK12:       omp.inner.for.inc:
23666 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23667 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23668 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23669 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
23670 // CHECK12:       omp.inner.for.end:
23671 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23672 // CHECK12:       omp.loop.exit:
23673 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23674 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23675 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23676 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23677 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23678 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23679 // CHECK12:       .omp.final.then:
23680 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23681 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23682 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23683 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23684 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23685 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23686 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23687 // CHECK12:       .omp.final.done:
23688 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23689 // CHECK12:       omp.precond.end:
23690 // CHECK12-NEXT:    ret void
23691 //
23692 //
23693 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
23694 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23695 // CHECK12-NEXT:  entry:
23696 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
23697 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23698 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23699 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23700 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23701 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
23702 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23703 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23704 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23705 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23706 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23707 // CHECK12-NEXT:    ret void
23708 //
23709 //
23710 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6
23711 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23712 // CHECK12-NEXT:  entry:
23713 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23714 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23715 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
23716 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23717 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23718 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23719 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23720 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23721 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23722 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23723 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23724 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23725 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23726 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
23727 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23728 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23729 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23730 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23731 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23732 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
23733 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23734 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23735 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23736 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23737 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
23738 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23739 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
23740 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
23741 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
23742 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
23743 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
23744 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23745 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
23746 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23747 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23748 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23749 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23750 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23751 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
23752 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23753 // CHECK12:       omp.precond.then:
23754 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
23755 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23756 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
23757 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23758 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23759 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
23760 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23761 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23762 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
23763 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23764 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23765 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23766 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23767 // CHECK12:       cond.true:
23768 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23769 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23770 // CHECK12:       cond.false:
23771 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
23772 // CHECK12-NEXT:    br label [[COND_END]]
23773 // CHECK12:       cond.end:
23774 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23775 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
23776 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
23777 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23778 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23779 // CHECK12:       omp.inner.for.cond:
23780 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23781 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23782 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
23783 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
23784 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23785 // CHECK12:       omp.inner.for.body:
23786 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23787 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23788 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
23789 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23790 // CHECK12:       omp.inner.for.inc:
23791 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23792 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23793 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
23794 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23795 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23796 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23797 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
23798 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23799 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23800 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
23801 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
23802 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23803 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23804 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23805 // CHECK12-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
23806 // CHECK12-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
23807 // CHECK12:       cond.true10:
23808 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
23809 // CHECK12-NEXT:    br label [[COND_END12:%.*]]
23810 // CHECK12:       cond.false11:
23811 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23812 // CHECK12-NEXT:    br label [[COND_END12]]
23813 // CHECK12:       cond.end12:
23814 // CHECK12-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
23815 // CHECK12-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
23816 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
23817 // CHECK12-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
23818 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
23819 // CHECK12:       omp.inner.for.end:
23820 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23821 // CHECK12:       omp.loop.exit:
23822 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23823 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
23824 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
23825 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23826 // CHECK12-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
23827 // CHECK12-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23828 // CHECK12:       .omp.final.then:
23829 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23830 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
23831 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
23832 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
23833 // CHECK12-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
23834 // CHECK12-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
23835 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23836 // CHECK12:       .omp.final.done:
23837 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23838 // CHECK12:       omp.precond.end:
23839 // CHECK12-NEXT:    ret void
23840 //
23841 //
23842 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7
23843 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23844 // CHECK12-NEXT:  entry:
23845 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23846 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23847 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
23848 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
23849 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23850 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23851 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23852 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23853 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23854 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23855 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23856 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23857 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23858 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23859 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23860 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23861 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23862 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
23863 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23864 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23865 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23866 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23867 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
23868 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
23869 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
23870 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
23871 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
23872 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
23873 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
23874 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
23875 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
23876 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
23877 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23878 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
23879 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
23880 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
23881 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
23882 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
23883 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23884 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
23885 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23886 // CHECK12:       omp.precond.then:
23887 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23888 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23889 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
23890 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
23891 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
23892 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
23893 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
23894 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23895 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23896 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23897 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
23898 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23899 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23900 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23901 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
23902 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23903 // CHECK12:       cond.true:
23904 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
23905 // CHECK12-NEXT:    br label [[COND_END:%.*]]
23906 // CHECK12:       cond.false:
23907 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23908 // CHECK12-NEXT:    br label [[COND_END]]
23909 // CHECK12:       cond.end:
23910 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
23911 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23912 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23913 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
23914 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23915 // CHECK12:       omp.inner.for.cond:
23916 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23917 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
23918 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23919 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23920 // CHECK12:       omp.inner.for.body:
23921 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23922 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
23923 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23924 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
23925 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
23926 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23927 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
23928 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
23929 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
23930 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23931 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
23932 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
23933 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
23934 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
23935 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
23936 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
23937 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
23938 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23939 // CHECK12:       omp.body.continue:
23940 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23941 // CHECK12:       omp.inner.for.inc:
23942 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23943 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
23944 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
23945 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
23946 // CHECK12:       omp.inner.for.end:
23947 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23948 // CHECK12:       omp.loop.exit:
23949 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23950 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23951 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23952 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23953 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23954 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23955 // CHECK12:       .omp.final.then:
23956 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23957 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
23958 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
23959 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
23960 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
23961 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
23962 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23963 // CHECK12:       .omp.final.done:
23964 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
23965 // CHECK12:       omp.precond.end:
23966 // CHECK12-NEXT:    ret void
23967 //
23968 //
23969 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
23970 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
23971 // CHECK12-NEXT:  entry:
23972 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23973 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
23974 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
23975 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
23976 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23977 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
23978 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
23979 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
23980 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
23981 // CHECK12-NEXT:    ret void
23982 //
23983 //
23984 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10
23985 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
23986 // CHECK12-NEXT:  entry:
23987 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23988 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23989 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
23990 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
23991 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
23992 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
23993 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23994 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23995 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23996 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
23997 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
23998 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
23999 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24000 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24001 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24002 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24003 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24004 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24005 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24006 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24007 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24008 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24009 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24010 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24011 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24012 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24013 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24014 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24015 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24016 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24017 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24018 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24019 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24020 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24021 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24022 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24023 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24024 // CHECK12:       omp.precond.then:
24025 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24026 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24027 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
24028 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24029 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24030 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24031 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24032 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24033 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24034 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24035 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
24036 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24037 // CHECK12:       cond.true:
24038 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24039 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24040 // CHECK12:       cond.false:
24041 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24042 // CHECK12-NEXT:    br label [[COND_END]]
24043 // CHECK12:       cond.end:
24044 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
24045 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24046 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24047 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
24048 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24049 // CHECK12:       omp.inner.for.cond:
24050 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24051 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
24052 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
24053 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24054 // CHECK12:       omp.inner.for.body:
24055 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
24056 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
24057 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
24058 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24059 // CHECK12:       omp.inner.for.inc:
24060 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24061 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
24062 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
24063 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
24064 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
24065 // CHECK12:       omp.inner.for.end:
24066 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24067 // CHECK12:       omp.loop.exit:
24068 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24069 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
24070 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
24071 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24072 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
24073 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24074 // CHECK12:       .omp.final.then:
24075 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24076 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
24077 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
24078 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
24079 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
24080 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
24081 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24082 // CHECK12:       .omp.final.done:
24083 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24084 // CHECK12:       omp.precond.end:
24085 // CHECK12-NEXT:    ret void
24086 //
24087 //
24088 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11
24089 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24090 // CHECK12-NEXT:  entry:
24091 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24092 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24093 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24094 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24095 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24096 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24097 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24098 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24099 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24100 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24101 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24102 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24103 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24104 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24105 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24106 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24107 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24108 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24109 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24110 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24111 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24112 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24113 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24114 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24115 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24116 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24117 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24118 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24119 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24120 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24121 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24122 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24123 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24124 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24125 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24126 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24127 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24128 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24129 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24130 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24131 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24132 // CHECK12:       omp.precond.then:
24133 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24134 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24135 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24136 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24137 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24138 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24139 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24140 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24141 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24142 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24143 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24144 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24145 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24146 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24147 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24148 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24149 // CHECK12:       cond.true:
24150 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24151 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24152 // CHECK12:       cond.false:
24153 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24154 // CHECK12-NEXT:    br label [[COND_END]]
24155 // CHECK12:       cond.end:
24156 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24157 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24158 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24159 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24160 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24161 // CHECK12:       omp.inner.for.cond:
24162 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24163 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
24164 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24165 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24166 // CHECK12:       omp.inner.for.body:
24167 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24168 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
24169 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24170 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
24171 // CHECK12-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
24172 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24173 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
24174 // CHECK12-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
24175 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
24176 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24177 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
24178 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
24179 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
24180 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
24181 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
24182 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
24183 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
24184 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24185 // CHECK12:       omp.body.continue:
24186 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24187 // CHECK12:       omp.inner.for.inc:
24188 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24189 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
24190 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
24191 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
24192 // CHECK12:       omp.inner.for.end:
24193 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24194 // CHECK12:       omp.loop.exit:
24195 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24196 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
24197 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
24198 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24199 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
24200 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24201 // CHECK12:       .omp.final.then:
24202 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24203 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
24204 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
24205 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
24206 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
24207 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
24208 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24209 // CHECK12:       .omp.final.done:
24210 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24211 // CHECK12:       omp.precond.end:
24212 // CHECK12-NEXT:    ret void
24213 //
24214 //
24215 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
24216 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24217 // CHECK12-NEXT:  entry:
24218 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
24219 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24220 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24221 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24222 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24223 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
24224 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24225 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24226 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24227 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24228 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24229 // CHECK12-NEXT:    ret void
24230 //
24231 //
24232 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14
24233 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24234 // CHECK12-NEXT:  entry:
24235 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24236 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24237 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
24238 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24239 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24240 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24241 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24242 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24243 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24244 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24245 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24246 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24247 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24248 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24249 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24250 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24251 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24252 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24253 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24254 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24255 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24256 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
24257 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24258 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24259 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24260 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24261 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
24262 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24263 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
24264 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
24265 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
24266 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
24267 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
24268 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
24269 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24270 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24271 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
24272 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24273 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24274 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24275 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24276 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24277 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
24278 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24279 // CHECK12:       omp.precond.then:
24280 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24281 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24282 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
24283 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24284 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24285 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24286 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24287 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24288 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24289 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24290 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24291 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24292 // CHECK12:       cond.true:
24293 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24294 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24295 // CHECK12:       cond.false:
24296 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24297 // CHECK12-NEXT:    br label [[COND_END]]
24298 // CHECK12:       cond.end:
24299 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24300 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24301 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24302 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24303 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24304 // CHECK12:       omp.inner.for.cond:
24305 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24306 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
24307 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24308 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24309 // CHECK12:       omp.inner.for.body:
24310 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
24311 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
24312 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
24313 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
24314 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
24315 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
24316 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24317 // CHECK12:       omp.inner.for.inc:
24318 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24319 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
24320 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
24321 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
24322 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
24323 // CHECK12:       omp.inner.for.end:
24324 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24325 // CHECK12:       omp.loop.exit:
24326 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24327 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
24328 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
24329 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24330 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
24331 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24332 // CHECK12:       .omp.final.then:
24333 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24334 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
24335 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
24336 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
24337 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
24338 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
24339 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24340 // CHECK12:       .omp.final.done:
24341 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24342 // CHECK12:       omp.precond.end:
24343 // CHECK12-NEXT:    ret void
24344 //
24345 //
24346 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15
24347 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
24348 // CHECK12-NEXT:  entry:
24349 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24350 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24351 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24352 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24353 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24354 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24355 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24356 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24357 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24358 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24359 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24360 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24361 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24362 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24363 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24364 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24365 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24366 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24367 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24368 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24369 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24370 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24371 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24372 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24373 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24374 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24375 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24376 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24377 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24378 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24379 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24380 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24381 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24382 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24383 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24384 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24385 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24386 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24387 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24388 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24389 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24390 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24391 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24392 // CHECK12:       omp.precond.then:
24393 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24394 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24395 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24396 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24397 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24398 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24399 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24400 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24401 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24402 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24403 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24404 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
24405 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
24406 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24407 // CHECK12:       omp.dispatch.cond:
24408 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24409 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24410 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
24411 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24412 // CHECK12:       cond.true:
24413 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24414 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24415 // CHECK12:       cond.false:
24416 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24417 // CHECK12-NEXT:    br label [[COND_END]]
24418 // CHECK12:       cond.end:
24419 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
24420 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24421 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24422 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
24423 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24424 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24425 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
24426 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24427 // CHECK12:       omp.dispatch.body:
24428 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24429 // CHECK12:       omp.inner.for.cond:
24430 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24431 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
24432 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
24433 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24434 // CHECK12:       omp.inner.for.body:
24435 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24436 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
24437 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24438 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
24439 // CHECK12-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
24440 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24441 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
24442 // CHECK12-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
24443 // CHECK12-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
24444 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24445 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
24446 // CHECK12-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
24447 // CHECK12-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
24448 // CHECK12-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
24449 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
24450 // CHECK12-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
24451 // CHECK12-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
24452 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24453 // CHECK12:       omp.body.continue:
24454 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24455 // CHECK12:       omp.inner.for.inc:
24456 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24457 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
24458 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
24459 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
24460 // CHECK12:       omp.inner.for.end:
24461 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24462 // CHECK12:       omp.dispatch.inc:
24463 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24464 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24465 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
24466 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
24467 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24468 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
24469 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
24470 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
24471 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24472 // CHECK12:       omp.dispatch.end:
24473 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24474 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
24475 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
24476 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24477 // CHECK12-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
24478 // CHECK12-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24479 // CHECK12:       .omp.final.then:
24480 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24481 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
24482 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
24483 // CHECK12-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
24484 // CHECK12-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
24485 // CHECK12-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
24486 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24487 // CHECK12:       .omp.final.done:
24488 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24489 // CHECK12:       omp.precond.end:
24490 // CHECK12-NEXT:    ret void
24491 //
24492 //
24493 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
24494 // CHECK12-SAME: (i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24495 // CHECK12-NEXT:  entry:
24496 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24497 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24498 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24499 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24500 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24501 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24502 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24503 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24504 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24505 // CHECK12-NEXT:    ret void
24506 //
24507 //
24508 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18
24509 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24510 // CHECK12-NEXT:  entry:
24511 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24512 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24513 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24514 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24515 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24516 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24517 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24518 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24519 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24520 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24521 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24522 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24523 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24524 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24525 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24526 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24527 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24528 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24529 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24530 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24531 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24532 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24533 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24534 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24535 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24536 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24537 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24538 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24539 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24540 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24541 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24542 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24543 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24544 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24545 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24546 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24547 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24548 // CHECK12:       omp.precond.then:
24549 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24550 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24551 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
24552 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24553 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24554 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24555 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24556 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24557 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24558 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24559 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
24560 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24561 // CHECK12:       cond.true:
24562 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24563 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24564 // CHECK12:       cond.false:
24565 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24566 // CHECK12-NEXT:    br label [[COND_END]]
24567 // CHECK12:       cond.end:
24568 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
24569 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24570 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24571 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
24572 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24573 // CHECK12:       omp.inner.for.cond:
24574 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24575 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
24576 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
24577 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24578 // CHECK12:       omp.inner.for.body:
24579 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
24580 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
24581 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
24582 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24583 // CHECK12:       omp.inner.for.inc:
24584 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24585 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
24586 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
24587 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
24588 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
24589 // CHECK12:       omp.inner.for.end:
24590 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24591 // CHECK12:       omp.loop.exit:
24592 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24593 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
24594 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
24595 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24596 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
24597 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24598 // CHECK12:       .omp.final.then:
24599 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24600 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
24601 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
24602 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
24603 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
24604 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
24605 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24606 // CHECK12:       .omp.final.done:
24607 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24608 // CHECK12:       omp.precond.end:
24609 // CHECK12-NEXT:    ret void
24610 //
24611 //
24612 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19
24613 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24614 // CHECK12-NEXT:  entry:
24615 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24616 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24617 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24618 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24619 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24620 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24621 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24622 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24623 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24624 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24625 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24626 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24627 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24628 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24629 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24630 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24631 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24632 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
24633 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24634 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24635 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24636 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24637 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24638 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24639 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24640 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24641 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24642 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24643 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24644 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24645 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24646 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
24647 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24648 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24649 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24650 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
24651 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24652 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24653 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24654 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24655 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24656 // CHECK12:       omp.precond.then:
24657 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24658 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24659 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24660 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24661 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24662 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24663 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24664 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24665 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24666 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24667 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24668 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24669 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
24670 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
24671 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24672 // CHECK12:       omp.dispatch.cond:
24673 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24674 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
24675 // CHECK12-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24676 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
24677 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24678 // CHECK12:       omp.dispatch.body:
24679 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24680 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
24681 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24682 // CHECK12:       omp.inner.for.cond:
24683 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24684 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
24685 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
24686 // CHECK12-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24687 // CHECK12:       omp.inner.for.body:
24688 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24689 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
24690 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24691 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
24692 // CHECK12-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
24693 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24694 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
24695 // CHECK12-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
24696 // CHECK12-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
24697 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24698 // CHECK12-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
24699 // CHECK12-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
24700 // CHECK12-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
24701 // CHECK12-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
24702 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
24703 // CHECK12-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
24704 // CHECK12-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
24705 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24706 // CHECK12:       omp.body.continue:
24707 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24708 // CHECK12:       omp.inner.for.inc:
24709 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24710 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
24711 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
24712 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
24713 // CHECK12:       omp.inner.for.end:
24714 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24715 // CHECK12:       omp.dispatch.inc:
24716 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24717 // CHECK12:       omp.dispatch.end:
24718 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24719 // CHECK12-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
24720 // CHECK12-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24721 // CHECK12:       .omp.final.then:
24722 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24723 // CHECK12-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
24724 // CHECK12-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
24725 // CHECK12-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
24726 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
24727 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
24728 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24729 // CHECK12:       .omp.final.done:
24730 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24731 // CHECK12:       omp.precond.end:
24732 // CHECK12-NEXT:    ret void
24733 //
24734 //
24735 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
24736 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], double* [[A:%.*]], double* [[B:%.*]], double* [[C:%.*]]) #[[ATTR1]] {
24737 // CHECK12-NEXT:  entry:
24738 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
24739 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24740 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
24741 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
24742 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
24743 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
24744 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24745 // CHECK12-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
24746 // CHECK12-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
24747 // CHECK12-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
24748 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
24749 // CHECK12-NEXT:    ret void
24750 //
24751 //
24752 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..22
24753 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
24754 // CHECK12-NEXT:  entry:
24755 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24756 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24757 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
24758 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24759 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24760 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24761 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24762 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24763 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24764 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24765 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24766 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24767 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24768 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
24769 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
24770 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24771 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24772 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24773 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24774 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24775 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24776 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
24777 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24778 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24779 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24780 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24781 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
24782 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24783 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
24784 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
24785 // CHECK12-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
24786 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
24787 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
24788 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
24789 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24790 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24791 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
24792 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24793 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24794 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24795 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24796 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24797 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
24798 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24799 // CHECK12:       omp.precond.then:
24800 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
24801 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24802 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
24803 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24804 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24805 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24806 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
24807 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24808 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24809 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24810 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
24811 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24812 // CHECK12:       cond.true:
24813 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24814 // CHECK12-NEXT:    br label [[COND_END:%.*]]
24815 // CHECK12:       cond.false:
24816 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
24817 // CHECK12-NEXT:    br label [[COND_END]]
24818 // CHECK12:       cond.end:
24819 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
24820 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
24821 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
24822 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
24823 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24824 // CHECK12:       omp.inner.for.cond:
24825 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24826 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
24827 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24828 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24829 // CHECK12:       omp.inner.for.body:
24830 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
24831 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
24832 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
24833 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
24834 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
24835 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
24836 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24837 // CHECK12:       omp.inner.for.inc:
24838 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24839 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
24840 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
24841 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
24842 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
24843 // CHECK12:       omp.inner.for.end:
24844 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24845 // CHECK12:       omp.loop.exit:
24846 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24847 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
24848 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
24849 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24850 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
24851 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24852 // CHECK12:       .omp.final.then:
24853 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24854 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
24855 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
24856 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
24857 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
24858 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
24859 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24860 // CHECK12:       .omp.final.done:
24861 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24862 // CHECK12:       omp.precond.end:
24863 // CHECK12-NEXT:    ret void
24864 //
24865 //
24866 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..23
24867 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], double** nonnull align 4 dereferenceable(4) [[A:%.*]], double** nonnull align 4 dereferenceable(4) [[B:%.*]], double** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
24868 // CHECK12-NEXT:  entry:
24869 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24870 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24871 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
24872 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
24873 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
24874 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
24875 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
24876 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
24877 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24878 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24879 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24880 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
24881 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24882 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
24883 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24884 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24885 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24886 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24887 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
24888 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24889 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24890 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24891 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24892 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
24893 // CHECK12-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
24894 // CHECK12-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
24895 // CHECK12-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
24896 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24897 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
24898 // CHECK12-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
24899 // CHECK12-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
24900 // CHECK12-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
24901 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
24902 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
24903 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24904 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
24905 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
24906 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
24907 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24908 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
24909 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24910 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
24911 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24912 // CHECK12:       omp.precond.then:
24913 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24914 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24915 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
24916 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
24917 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
24918 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
24919 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
24920 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24921 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24922 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24923 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24924 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24925 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24926 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
24927 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
24928 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
24929 // CHECK12:       omp.dispatch.cond:
24930 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24931 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
24932 // CHECK12-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
24933 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
24934 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
24935 // CHECK12:       omp.dispatch.body:
24936 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24937 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
24938 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24939 // CHECK12:       omp.inner.for.cond:
24940 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24941 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
24942 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
24943 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24944 // CHECK12:       omp.inner.for.body:
24945 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24946 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
24947 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24948 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
24949 // CHECK12-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
24950 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24951 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
24952 // CHECK12-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
24953 // CHECK12-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
24954 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24955 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
24956 // CHECK12-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
24957 // CHECK12-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
24958 // CHECK12-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
24959 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
24960 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
24961 // CHECK12-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
24962 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24963 // CHECK12:       omp.body.continue:
24964 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24965 // CHECK12:       omp.inner.for.inc:
24966 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24967 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
24968 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
24969 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
24970 // CHECK12:       omp.inner.for.end:
24971 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
24972 // CHECK12:       omp.dispatch.inc:
24973 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
24974 // CHECK12:       omp.dispatch.end:
24975 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24976 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
24977 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24978 // CHECK12:       .omp.final.then:
24979 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
24980 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
24981 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
24982 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
24983 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
24984 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
24985 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24986 // CHECK12:       .omp.final.done:
24987 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
24988 // CHECK12:       omp.precond.end:
24989 // CHECK12-NEXT:    ret void
24990 //
24991 //
24992 // CHECK12-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
24993 // CHECK12-SAME: () #[[ATTR3:[0-9]+]] comdat {
24994 // CHECK12-NEXT:  entry:
24995 // CHECK12-NEXT:    [[A:%.*]] = alloca i32*, align 4
24996 // CHECK12-NEXT:    [[B:%.*]] = alloca i32*, align 4
24997 // CHECK12-NEXT:    [[C:%.*]] = alloca i32*, align 4
24998 // CHECK12-NEXT:    [[N:%.*]] = alloca i32, align 4
24999 // CHECK12-NEXT:    [[CH:%.*]] = alloca i32, align 4
25000 // CHECK12-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25001 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
25002 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
25003 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
25004 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25005 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25006 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25007 // CHECK12-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
25008 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
25009 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
25010 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
25011 // CHECK12-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
25012 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
25013 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
25014 // CHECK12-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
25015 // CHECK12-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
25016 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
25017 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
25018 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
25019 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
25020 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
25021 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
25022 // CHECK12-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
25023 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
25024 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
25025 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
25026 // CHECK12-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
25027 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
25028 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
25029 // CHECK12-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
25030 // CHECK12-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
25031 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
25032 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
25033 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
25034 // CHECK12-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
25035 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
25036 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
25037 // CHECK12-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
25038 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
25039 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
25040 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
25041 // CHECK12-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
25042 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
25043 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
25044 // CHECK12-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
25045 // CHECK12-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
25046 // CHECK12-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
25047 // CHECK12-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
25048 // CHECK12-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
25049 // CHECK12-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
25050 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
25051 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
25052 // CHECK12-NEXT:    store i32 10000, i32* [[N]], align 4
25053 // CHECK12-NEXT:    store i32 100, i32* [[CH]], align 4
25054 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
25055 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
25056 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
25057 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
25058 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
25059 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
25060 // CHECK12-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
25061 // CHECK12-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
25062 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
25063 // CHECK12-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
25064 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
25065 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
25066 // CHECK12-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
25067 // CHECK12-NEXT:    store i8* null, i8** [[TMP9]], align 4
25068 // CHECK12-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
25069 // CHECK12-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
25070 // CHECK12-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 4
25071 // CHECK12-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
25072 // CHECK12-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
25073 // CHECK12-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 4
25074 // CHECK12-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
25075 // CHECK12-NEXT:    store i8* null, i8** [[TMP14]], align 4
25076 // CHECK12-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
25077 // CHECK12-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
25078 // CHECK12-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 4
25079 // CHECK12-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
25080 // CHECK12-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
25081 // CHECK12-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 4
25082 // CHECK12-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
25083 // CHECK12-NEXT:    store i8* null, i8** [[TMP19]], align 4
25084 // CHECK12-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
25085 // CHECK12-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
25086 // CHECK12-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 4
25087 // CHECK12-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
25088 // CHECK12-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
25089 // CHECK12-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 4
25090 // CHECK12-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
25091 // CHECK12-NEXT:    store i8* null, i8** [[TMP24]], align 4
25092 // CHECK12-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
25093 // CHECK12-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
25094 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
25095 // CHECK12-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
25096 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25097 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
25098 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25099 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25100 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25101 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25102 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
25103 // CHECK12-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
25104 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
25105 // CHECK12-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25106 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25107 // CHECK12-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
25108 // CHECK12:       omp_offload.failed:
25109 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
25110 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT]]
25111 // CHECK12:       omp_offload.cont:
25112 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
25113 // CHECK12-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
25114 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
25115 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 4
25116 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 4
25117 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 4
25118 // CHECK12-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
25119 // CHECK12-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
25120 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
25121 // CHECK12-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
25122 // CHECK12-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
25123 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
25124 // CHECK12-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
25125 // CHECK12-NEXT:    store i8* null, i8** [[TMP42]], align 4
25126 // CHECK12-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
25127 // CHECK12-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
25128 // CHECK12-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 4
25129 // CHECK12-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
25130 // CHECK12-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
25131 // CHECK12-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 4
25132 // CHECK12-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
25133 // CHECK12-NEXT:    store i8* null, i8** [[TMP47]], align 4
25134 // CHECK12-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
25135 // CHECK12-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
25136 // CHECK12-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 4
25137 // CHECK12-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
25138 // CHECK12-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
25139 // CHECK12-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 4
25140 // CHECK12-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
25141 // CHECK12-NEXT:    store i8* null, i8** [[TMP52]], align 4
25142 // CHECK12-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
25143 // CHECK12-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
25144 // CHECK12-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 4
25145 // CHECK12-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
25146 // CHECK12-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
25147 // CHECK12-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 4
25148 // CHECK12-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
25149 // CHECK12-NEXT:    store i8* null, i8** [[TMP57]], align 4
25150 // CHECK12-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
25151 // CHECK12-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
25152 // CHECK12-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
25153 // CHECK12-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
25154 // CHECK12-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
25155 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
25156 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25157 // CHECK12-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
25158 // CHECK12-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
25159 // CHECK12-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
25160 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
25161 // CHECK12-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
25162 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
25163 // CHECK12-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25164 // CHECK12-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
25165 // CHECK12-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
25166 // CHECK12:       omp_offload.failed14:
25167 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
25168 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
25169 // CHECK12:       omp_offload.cont15:
25170 // CHECK12-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
25171 // CHECK12-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
25172 // CHECK12-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
25173 // CHECK12-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
25174 // CHECK12-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
25175 // CHECK12-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
25176 // CHECK12-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 4
25177 // CHECK12-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 4
25178 // CHECK12-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 4
25179 // CHECK12-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
25180 // CHECK12-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
25181 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
25182 // CHECK12-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
25183 // CHECK12-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
25184 // CHECK12-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
25185 // CHECK12-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
25186 // CHECK12-NEXT:    store i8* null, i8** [[TMP77]], align 4
25187 // CHECK12-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
25188 // CHECK12-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
25189 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
25190 // CHECK12-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
25191 // CHECK12-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
25192 // CHECK12-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
25193 // CHECK12-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
25194 // CHECK12-NEXT:    store i8* null, i8** [[TMP82]], align 4
25195 // CHECK12-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
25196 // CHECK12-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
25197 // CHECK12-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 4
25198 // CHECK12-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
25199 // CHECK12-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
25200 // CHECK12-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 4
25201 // CHECK12-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
25202 // CHECK12-NEXT:    store i8* null, i8** [[TMP87]], align 4
25203 // CHECK12-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
25204 // CHECK12-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
25205 // CHECK12-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 4
25206 // CHECK12-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
25207 // CHECK12-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
25208 // CHECK12-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 4
25209 // CHECK12-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
25210 // CHECK12-NEXT:    store i8* null, i8** [[TMP92]], align 4
25211 // CHECK12-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
25212 // CHECK12-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
25213 // CHECK12-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 4
25214 // CHECK12-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
25215 // CHECK12-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
25216 // CHECK12-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 4
25217 // CHECK12-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
25218 // CHECK12-NEXT:    store i8* null, i8** [[TMP97]], align 4
25219 // CHECK12-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
25220 // CHECK12-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
25221 // CHECK12-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
25222 // CHECK12-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
25223 // CHECK12-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
25224 // CHECK12-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
25225 // CHECK12-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
25226 // CHECK12-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
25227 // CHECK12-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
25228 // CHECK12-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
25229 // CHECK12-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
25230 // CHECK12-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
25231 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
25232 // CHECK12-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25233 // CHECK12-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
25234 // CHECK12-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
25235 // CHECK12:       omp_offload.failed27:
25236 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP67]], i32 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
25237 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
25238 // CHECK12:       omp_offload.cont28:
25239 // CHECK12-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
25240 // CHECK12-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
25241 // CHECK12-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
25242 // CHECK12-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 4
25243 // CHECK12-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 4
25244 // CHECK12-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 4
25245 // CHECK12-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
25246 // CHECK12-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
25247 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
25248 // CHECK12-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
25249 // CHECK12-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
25250 // CHECK12-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
25251 // CHECK12-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
25252 // CHECK12-NEXT:    store i8* null, i8** [[TMP115]], align 4
25253 // CHECK12-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
25254 // CHECK12-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
25255 // CHECK12-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 4
25256 // CHECK12-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
25257 // CHECK12-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
25258 // CHECK12-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 4
25259 // CHECK12-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
25260 // CHECK12-NEXT:    store i8* null, i8** [[TMP120]], align 4
25261 // CHECK12-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
25262 // CHECK12-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
25263 // CHECK12-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 4
25264 // CHECK12-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
25265 // CHECK12-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
25266 // CHECK12-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 4
25267 // CHECK12-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
25268 // CHECK12-NEXT:    store i8* null, i8** [[TMP125]], align 4
25269 // CHECK12-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
25270 // CHECK12-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
25271 // CHECK12-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 4
25272 // CHECK12-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
25273 // CHECK12-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
25274 // CHECK12-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 4
25275 // CHECK12-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
25276 // CHECK12-NEXT:    store i8* null, i8** [[TMP130]], align 4
25277 // CHECK12-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
25278 // CHECK12-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
25279 // CHECK12-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
25280 // CHECK12-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
25281 // CHECK12-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
25282 // CHECK12-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
25283 // CHECK12-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
25284 // CHECK12-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
25285 // CHECK12-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
25286 // CHECK12-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
25287 // CHECK12-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
25288 // CHECK12-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
25289 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
25290 // CHECK12-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25291 // CHECK12-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
25292 // CHECK12-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
25293 // CHECK12:       omp_offload.failed40:
25294 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
25295 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
25296 // CHECK12:       omp_offload.cont41:
25297 // CHECK12-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
25298 // CHECK12-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
25299 // CHECK12-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
25300 // CHECK12-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
25301 // CHECK12-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
25302 // CHECK12-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
25303 // CHECK12-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 4
25304 // CHECK12-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 4
25305 // CHECK12-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 4
25306 // CHECK12-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
25307 // CHECK12-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
25308 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
25309 // CHECK12-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
25310 // CHECK12-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
25311 // CHECK12-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
25312 // CHECK12-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
25313 // CHECK12-NEXT:    store i8* null, i8** [[TMP150]], align 4
25314 // CHECK12-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
25315 // CHECK12-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
25316 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
25317 // CHECK12-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
25318 // CHECK12-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
25319 // CHECK12-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
25320 // CHECK12-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
25321 // CHECK12-NEXT:    store i8* null, i8** [[TMP155]], align 4
25322 // CHECK12-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
25323 // CHECK12-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
25324 // CHECK12-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 4
25325 // CHECK12-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
25326 // CHECK12-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
25327 // CHECK12-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 4
25328 // CHECK12-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
25329 // CHECK12-NEXT:    store i8* null, i8** [[TMP160]], align 4
25330 // CHECK12-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
25331 // CHECK12-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
25332 // CHECK12-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 4
25333 // CHECK12-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
25334 // CHECK12-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
25335 // CHECK12-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 4
25336 // CHECK12-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
25337 // CHECK12-NEXT:    store i8* null, i8** [[TMP165]], align 4
25338 // CHECK12-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
25339 // CHECK12-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
25340 // CHECK12-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 4
25341 // CHECK12-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
25342 // CHECK12-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
25343 // CHECK12-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 4
25344 // CHECK12-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
25345 // CHECK12-NEXT:    store i8* null, i8** [[TMP170]], align 4
25346 // CHECK12-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
25347 // CHECK12-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
25348 // CHECK12-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
25349 // CHECK12-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
25350 // CHECK12-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
25351 // CHECK12-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
25352 // CHECK12-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
25353 // CHECK12-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
25354 // CHECK12-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
25355 // CHECK12-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
25356 // CHECK12-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
25357 // CHECK12-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
25358 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
25359 // CHECK12-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25360 // CHECK12-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
25361 // CHECK12-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
25362 // CHECK12:       omp_offload.failed54:
25363 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP140]], i32 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
25364 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
25365 // CHECK12:       omp_offload.cont55:
25366 // CHECK12-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
25367 // CHECK12-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
25368 // CHECK12-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
25369 // CHECK12-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 4
25370 // CHECK12-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 4
25371 // CHECK12-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 4
25372 // CHECK12-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
25373 // CHECK12-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
25374 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
25375 // CHECK12-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
25376 // CHECK12-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
25377 // CHECK12-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
25378 // CHECK12-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
25379 // CHECK12-NEXT:    store i8* null, i8** [[TMP188]], align 4
25380 // CHECK12-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
25381 // CHECK12-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
25382 // CHECK12-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 4
25383 // CHECK12-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
25384 // CHECK12-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
25385 // CHECK12-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 4
25386 // CHECK12-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
25387 // CHECK12-NEXT:    store i8* null, i8** [[TMP193]], align 4
25388 // CHECK12-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
25389 // CHECK12-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
25390 // CHECK12-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 4
25391 // CHECK12-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
25392 // CHECK12-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
25393 // CHECK12-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 4
25394 // CHECK12-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
25395 // CHECK12-NEXT:    store i8* null, i8** [[TMP198]], align 4
25396 // CHECK12-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
25397 // CHECK12-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
25398 // CHECK12-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 4
25399 // CHECK12-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
25400 // CHECK12-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
25401 // CHECK12-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 4
25402 // CHECK12-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
25403 // CHECK12-NEXT:    store i8* null, i8** [[TMP203]], align 4
25404 // CHECK12-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
25405 // CHECK12-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
25406 // CHECK12-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
25407 // CHECK12-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
25408 // CHECK12-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
25409 // CHECK12-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
25410 // CHECK12-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
25411 // CHECK12-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
25412 // CHECK12-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
25413 // CHECK12-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
25414 // CHECK12-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
25415 // CHECK12-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
25416 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
25417 // CHECK12-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25418 // CHECK12-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
25419 // CHECK12-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
25420 // CHECK12:       omp_offload.failed67:
25421 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
25422 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
25423 // CHECK12:       omp_offload.cont68:
25424 // CHECK12-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
25425 // CHECK12-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
25426 // CHECK12-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
25427 // CHECK12-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
25428 // CHECK12-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
25429 // CHECK12-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
25430 // CHECK12-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 4
25431 // CHECK12-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 4
25432 // CHECK12-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 4
25433 // CHECK12-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
25434 // CHECK12-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
25435 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
25436 // CHECK12-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
25437 // CHECK12-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
25438 // CHECK12-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
25439 // CHECK12-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
25440 // CHECK12-NEXT:    store i8* null, i8** [[TMP223]], align 4
25441 // CHECK12-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
25442 // CHECK12-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
25443 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
25444 // CHECK12-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
25445 // CHECK12-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
25446 // CHECK12-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
25447 // CHECK12-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
25448 // CHECK12-NEXT:    store i8* null, i8** [[TMP228]], align 4
25449 // CHECK12-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
25450 // CHECK12-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
25451 // CHECK12-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 4
25452 // CHECK12-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
25453 // CHECK12-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
25454 // CHECK12-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 4
25455 // CHECK12-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
25456 // CHECK12-NEXT:    store i8* null, i8** [[TMP233]], align 4
25457 // CHECK12-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
25458 // CHECK12-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
25459 // CHECK12-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 4
25460 // CHECK12-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
25461 // CHECK12-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
25462 // CHECK12-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 4
25463 // CHECK12-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
25464 // CHECK12-NEXT:    store i8* null, i8** [[TMP238]], align 4
25465 // CHECK12-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
25466 // CHECK12-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
25467 // CHECK12-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 4
25468 // CHECK12-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
25469 // CHECK12-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
25470 // CHECK12-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 4
25471 // CHECK12-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
25472 // CHECK12-NEXT:    store i8* null, i8** [[TMP243]], align 4
25473 // CHECK12-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
25474 // CHECK12-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
25475 // CHECK12-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
25476 // CHECK12-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
25477 // CHECK12-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
25478 // CHECK12-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
25479 // CHECK12-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
25480 // CHECK12-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
25481 // CHECK12-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
25482 // CHECK12-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
25483 // CHECK12-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
25484 // CHECK12-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
25485 // CHECK12-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
25486 // CHECK12-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
25487 // CHECK12-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
25488 // CHECK12-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
25489 // CHECK12:       omp_offload.failed81:
25490 // CHECK12-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP213]], i32 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
25491 // CHECK12-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
25492 // CHECK12:       omp_offload.cont82:
25493 // CHECK12-NEXT:    ret i32 0
25494 //
25495 //
25496 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
25497 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25498 // CHECK12-NEXT:  entry:
25499 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25500 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25501 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25502 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25503 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25504 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25505 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
25506 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
25507 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
25508 // CHECK12-NEXT:    ret void
25509 //
25510 //
25511 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..26
25512 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25513 // CHECK12-NEXT:  entry:
25514 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25515 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25516 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25517 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25518 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25519 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25520 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25521 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25522 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25523 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25524 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25525 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25526 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25527 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25528 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25529 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25530 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25531 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25532 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25533 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25534 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25535 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25536 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25537 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25538 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25539 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25540 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25541 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25542 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25543 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25544 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25545 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25546 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25547 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25548 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25549 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25550 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25551 // CHECK12:       omp.precond.then:
25552 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25553 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25554 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
25555 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25556 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25557 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25558 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25559 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25560 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25561 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25562 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25563 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25564 // CHECK12:       cond.true:
25565 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25566 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25567 // CHECK12:       cond.false:
25568 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25569 // CHECK12-NEXT:    br label [[COND_END]]
25570 // CHECK12:       cond.end:
25571 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25572 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25573 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25574 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25575 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25576 // CHECK12:       omp.inner.for.cond:
25577 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25578 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
25579 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25580 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25581 // CHECK12:       omp.inner.for.body:
25582 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
25583 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
25584 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
25585 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25586 // CHECK12:       omp.inner.for.inc:
25587 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25588 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
25589 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25590 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
25591 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
25592 // CHECK12:       omp.inner.for.end:
25593 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25594 // CHECK12:       omp.loop.exit:
25595 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25596 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25597 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25598 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25599 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25600 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25601 // CHECK12:       .omp.final.then:
25602 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25603 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25604 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25605 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25606 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25607 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25608 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25609 // CHECK12:       .omp.final.done:
25610 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25611 // CHECK12:       omp.precond.end:
25612 // CHECK12-NEXT:    ret void
25613 //
25614 //
25615 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..27
25616 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25617 // CHECK12-NEXT:  entry:
25618 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25619 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25620 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25621 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25622 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25623 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25624 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25625 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25626 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25627 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25628 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25629 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25630 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25631 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25632 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25633 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25634 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25635 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25636 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25637 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25638 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25639 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25640 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25641 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25642 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25643 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25644 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25645 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25646 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25647 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25648 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25649 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25650 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25651 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25652 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25653 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25654 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25655 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25656 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25657 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25658 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25659 // CHECK12:       omp.precond.then:
25660 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25661 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25662 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25663 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25664 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25665 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
25666 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
25667 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25668 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25669 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25670 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
25671 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25672 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25673 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25674 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
25675 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25676 // CHECK12:       cond.true:
25677 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25678 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25679 // CHECK12:       cond.false:
25680 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25681 // CHECK12-NEXT:    br label [[COND_END]]
25682 // CHECK12:       cond.end:
25683 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
25684 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25685 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25686 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
25687 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25688 // CHECK12:       omp.inner.for.cond:
25689 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25690 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
25691 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
25692 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25693 // CHECK12:       omp.inner.for.body:
25694 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25695 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
25696 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25697 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
25698 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
25699 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25700 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
25701 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
25702 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
25703 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25704 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
25705 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
25706 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
25707 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
25708 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
25709 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
25710 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
25711 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25712 // CHECK12:       omp.body.continue:
25713 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25714 // CHECK12:       omp.inner.for.inc:
25715 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25716 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
25717 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
25718 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
25719 // CHECK12:       omp.inner.for.end:
25720 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25721 // CHECK12:       omp.loop.exit:
25722 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25723 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
25724 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
25725 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25726 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25727 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25728 // CHECK12:       .omp.final.then:
25729 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25730 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
25731 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25732 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
25733 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
25734 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
25735 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25736 // CHECK12:       .omp.final.done:
25737 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25738 // CHECK12:       omp.precond.end:
25739 // CHECK12-NEXT:    ret void
25740 //
25741 //
25742 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
25743 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25744 // CHECK12-NEXT:  entry:
25745 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25746 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25747 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25748 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25749 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25750 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25751 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
25752 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
25753 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
25754 // CHECK12-NEXT:    ret void
25755 //
25756 //
25757 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..30
25758 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25759 // CHECK12-NEXT:  entry:
25760 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25761 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25762 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25763 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25764 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25765 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25766 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25767 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25768 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25769 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25770 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25771 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
25772 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
25773 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25774 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25775 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25776 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25777 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25778 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25779 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25780 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25781 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25782 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25783 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25784 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25785 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25786 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25787 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25788 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25789 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25790 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25791 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25792 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25793 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25794 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25795 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25796 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25797 // CHECK12:       omp.precond.then:
25798 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
25799 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25800 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
25801 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25802 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25803 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25804 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25805 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25806 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25807 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25808 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
25809 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25810 // CHECK12:       cond.true:
25811 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25812 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25813 // CHECK12:       cond.false:
25814 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
25815 // CHECK12-NEXT:    br label [[COND_END]]
25816 // CHECK12:       cond.end:
25817 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
25818 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
25819 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
25820 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
25821 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25822 // CHECK12:       omp.inner.for.cond:
25823 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25824 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
25825 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25826 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25827 // CHECK12:       omp.inner.for.body:
25828 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
25829 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
25830 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
25831 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25832 // CHECK12:       omp.inner.for.inc:
25833 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25834 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
25835 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
25836 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
25837 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
25838 // CHECK12:       omp.inner.for.end:
25839 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25840 // CHECK12:       omp.loop.exit:
25841 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25842 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
25843 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
25844 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25845 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
25846 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25847 // CHECK12:       .omp.final.then:
25848 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25849 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
25850 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
25851 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
25852 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
25853 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
25854 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25855 // CHECK12:       .omp.final.done:
25856 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25857 // CHECK12:       omp.precond.end:
25858 // CHECK12-NEXT:    ret void
25859 //
25860 //
25861 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..31
25862 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
25863 // CHECK12-NEXT:  entry:
25864 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25865 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25866 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
25867 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
25868 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
25869 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
25870 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
25871 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
25872 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25873 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25874 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25875 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
25876 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
25877 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25878 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25879 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25880 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25881 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
25882 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25883 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25884 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25885 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25886 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
25887 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
25888 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
25889 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
25890 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
25891 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
25892 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
25893 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
25894 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
25895 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
25896 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25897 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
25898 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
25899 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
25900 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
25901 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
25902 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25903 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
25904 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25905 // CHECK12:       omp.precond.then:
25906 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25907 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25908 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
25909 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
25910 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
25911 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
25912 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
25913 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25914 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25915 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25916 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
25917 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25918 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25919 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25920 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
25921 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25922 // CHECK12:       cond.true:
25923 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
25924 // CHECK12-NEXT:    br label [[COND_END:%.*]]
25925 // CHECK12:       cond.false:
25926 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25927 // CHECK12-NEXT:    br label [[COND_END]]
25928 // CHECK12:       cond.end:
25929 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
25930 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25931 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25932 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
25933 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25934 // CHECK12:       omp.inner.for.cond:
25935 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25936 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
25937 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
25938 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25939 // CHECK12:       omp.inner.for.body:
25940 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25941 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
25942 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25943 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
25944 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
25945 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25946 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
25947 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
25948 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
25949 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25950 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
25951 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
25952 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
25953 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
25954 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
25955 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
25956 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
25957 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25958 // CHECK12:       omp.body.continue:
25959 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25960 // CHECK12:       omp.inner.for.inc:
25961 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25962 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
25963 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
25964 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
25965 // CHECK12:       omp.inner.for.end:
25966 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25967 // CHECK12:       omp.loop.exit:
25968 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25969 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
25970 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
25971 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25972 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
25973 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25974 // CHECK12:       .omp.final.then:
25975 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25976 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
25977 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
25978 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
25979 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
25980 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
25981 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25982 // CHECK12:       .omp.final.done:
25983 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
25984 // CHECK12:       omp.precond.end:
25985 // CHECK12-NEXT:    ret void
25986 //
25987 //
25988 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
25989 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
25990 // CHECK12-NEXT:  entry:
25991 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
25992 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25993 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
25994 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
25995 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
25996 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
25997 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25998 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
25999 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26000 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26001 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26002 // CHECK12-NEXT:    ret void
26003 //
26004 //
26005 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..34
26006 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26007 // CHECK12-NEXT:  entry:
26008 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26009 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26010 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
26011 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26012 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26013 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26014 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26015 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26016 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26017 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26018 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26019 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26020 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26021 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26022 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26023 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26024 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26025 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26026 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26027 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
26028 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26029 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26030 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26031 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26032 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
26033 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26034 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26035 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26036 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26037 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
26038 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
26039 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26040 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
26041 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26042 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26043 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26044 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26045 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26046 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
26047 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26048 // CHECK12:       omp.precond.then:
26049 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26050 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26051 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
26052 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26053 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26054 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
26055 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26056 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26057 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
26058 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26059 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26060 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26061 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26062 // CHECK12:       cond.true:
26063 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26064 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26065 // CHECK12:       cond.false:
26066 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26067 // CHECK12-NEXT:    br label [[COND_END]]
26068 // CHECK12:       cond.end:
26069 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26070 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26071 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26072 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26073 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26074 // CHECK12:       omp.inner.for.cond:
26075 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26076 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26077 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
26078 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
26079 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26080 // CHECK12:       omp.inner.for.body:
26081 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26082 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26083 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
26084 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26085 // CHECK12:       omp.inner.for.inc:
26086 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26087 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26088 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
26089 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26090 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26091 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26092 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
26093 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26094 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26095 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
26096 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
26097 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26098 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26099 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26100 // CHECK12-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
26101 // CHECK12-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
26102 // CHECK12:       cond.true10:
26103 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
26104 // CHECK12-NEXT:    br label [[COND_END12:%.*]]
26105 // CHECK12:       cond.false11:
26106 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26107 // CHECK12-NEXT:    br label [[COND_END12]]
26108 // CHECK12:       cond.end12:
26109 // CHECK12-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
26110 // CHECK12-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
26111 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
26112 // CHECK12-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
26113 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
26114 // CHECK12:       omp.inner.for.end:
26115 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26116 // CHECK12:       omp.loop.exit:
26117 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26118 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
26119 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
26120 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26121 // CHECK12-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
26122 // CHECK12-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26123 // CHECK12:       .omp.final.then:
26124 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26125 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
26126 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
26127 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
26128 // CHECK12-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
26129 // CHECK12-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
26130 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26131 // CHECK12:       .omp.final.done:
26132 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26133 // CHECK12:       omp.precond.end:
26134 // CHECK12-NEXT:    ret void
26135 //
26136 //
26137 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..35
26138 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26139 // CHECK12-NEXT:  entry:
26140 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26141 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26142 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26143 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26144 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26145 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26146 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26147 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26148 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26149 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26150 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26151 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26152 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26153 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26154 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26155 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26156 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26157 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26158 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26159 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26160 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26161 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26162 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26163 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26164 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26165 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26166 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26167 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26168 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26169 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26170 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26171 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26172 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26173 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26174 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26175 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26176 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26177 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26178 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26179 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26180 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26181 // CHECK12:       omp.precond.then:
26182 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26183 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26184 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26185 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26186 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26187 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26188 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26189 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26190 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26191 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26192 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26193 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26194 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26195 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26196 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26197 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26198 // CHECK12:       cond.true:
26199 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26200 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26201 // CHECK12:       cond.false:
26202 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26203 // CHECK12-NEXT:    br label [[COND_END]]
26204 // CHECK12:       cond.end:
26205 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26206 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26207 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26208 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26209 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26210 // CHECK12:       omp.inner.for.cond:
26211 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26212 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
26213 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26214 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26215 // CHECK12:       omp.inner.for.body:
26216 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26217 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
26218 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26219 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
26220 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
26221 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26222 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
26223 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
26224 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
26225 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26226 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26227 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
26228 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
26229 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
26230 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
26231 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26232 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
26233 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26234 // CHECK12:       omp.body.continue:
26235 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26236 // CHECK12:       omp.inner.for.inc:
26237 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26238 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
26239 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
26240 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
26241 // CHECK12:       omp.inner.for.end:
26242 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26243 // CHECK12:       omp.loop.exit:
26244 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26245 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
26246 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
26247 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26248 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
26249 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26250 // CHECK12:       .omp.final.then:
26251 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26252 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
26253 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
26254 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
26255 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
26256 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
26257 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26258 // CHECK12:       .omp.final.done:
26259 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26260 // CHECK12:       omp.precond.end:
26261 // CHECK12-NEXT:    ret void
26262 //
26263 //
26264 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
26265 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26266 // CHECK12-NEXT:  entry:
26267 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26268 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26269 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26270 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26271 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26272 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26273 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26274 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26275 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26276 // CHECK12-NEXT:    ret void
26277 //
26278 //
26279 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..38
26280 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26281 // CHECK12-NEXT:  entry:
26282 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26283 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26284 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26285 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26286 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26287 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26288 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26289 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26290 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26291 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26292 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26293 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26294 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26295 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26296 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26297 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26298 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26299 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26300 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26301 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26302 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26303 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26304 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26305 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26306 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26307 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26308 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26309 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26310 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26311 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26312 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26313 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26314 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26315 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26316 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26317 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26318 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26319 // CHECK12:       omp.precond.then:
26320 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26321 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26322 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
26323 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26324 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26325 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26326 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
26327 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26328 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26329 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26330 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
26331 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26332 // CHECK12:       cond.true:
26333 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26334 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26335 // CHECK12:       cond.false:
26336 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26337 // CHECK12-NEXT:    br label [[COND_END]]
26338 // CHECK12:       cond.end:
26339 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
26340 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26341 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26342 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
26343 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26344 // CHECK12:       omp.inner.for.cond:
26345 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26346 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
26347 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
26348 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26349 // CHECK12:       omp.inner.for.body:
26350 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
26351 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
26352 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
26353 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26354 // CHECK12:       omp.inner.for.inc:
26355 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26356 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
26357 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
26358 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
26359 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
26360 // CHECK12:       omp.inner.for.end:
26361 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26362 // CHECK12:       omp.loop.exit:
26363 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26364 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
26365 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
26366 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26367 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
26368 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26369 // CHECK12:       .omp.final.then:
26370 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26371 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
26372 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26373 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
26374 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
26375 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
26376 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26377 // CHECK12:       .omp.final.done:
26378 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26379 // CHECK12:       omp.precond.end:
26380 // CHECK12-NEXT:    ret void
26381 //
26382 //
26383 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..39
26384 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26385 // CHECK12-NEXT:  entry:
26386 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26387 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26388 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26389 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26390 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26391 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26392 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26393 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26394 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26395 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26396 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26397 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26398 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26399 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26400 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26401 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26402 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26403 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26404 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26405 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26406 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26407 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26408 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26409 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26410 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26411 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26412 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26413 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26414 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26415 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26416 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26417 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26418 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26419 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26420 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26421 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26422 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26423 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26424 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26425 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26426 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26427 // CHECK12:       omp.precond.then:
26428 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26429 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26430 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26431 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26432 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26433 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26434 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26435 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26436 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26437 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26438 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26439 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26440 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26441 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26442 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26443 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26444 // CHECK12:       cond.true:
26445 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26446 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26447 // CHECK12:       cond.false:
26448 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26449 // CHECK12-NEXT:    br label [[COND_END]]
26450 // CHECK12:       cond.end:
26451 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26452 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26453 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26454 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26455 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26456 // CHECK12:       omp.inner.for.cond:
26457 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26458 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
26459 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26460 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26461 // CHECK12:       omp.inner.for.body:
26462 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26463 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
26464 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26465 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
26466 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
26467 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26468 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
26469 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
26470 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
26471 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26472 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26473 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
26474 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
26475 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
26476 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
26477 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26478 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
26479 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26480 // CHECK12:       omp.body.continue:
26481 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26482 // CHECK12:       omp.inner.for.inc:
26483 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26484 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
26485 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
26486 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
26487 // CHECK12:       omp.inner.for.end:
26488 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26489 // CHECK12:       omp.loop.exit:
26490 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26491 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
26492 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
26493 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26494 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
26495 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26496 // CHECK12:       .omp.final.then:
26497 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26498 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
26499 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
26500 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
26501 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
26502 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
26503 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26504 // CHECK12:       .omp.final.done:
26505 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26506 // CHECK12:       omp.precond.end:
26507 // CHECK12-NEXT:    ret void
26508 //
26509 //
26510 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
26511 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26512 // CHECK12-NEXT:  entry:
26513 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
26514 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26515 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26516 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26517 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26518 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
26519 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26520 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26521 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26522 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26523 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26524 // CHECK12-NEXT:    ret void
26525 //
26526 //
26527 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..42
26528 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26529 // CHECK12-NEXT:  entry:
26530 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26531 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26532 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
26533 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26534 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26535 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26536 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26537 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26538 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26539 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26540 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26541 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26542 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26543 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26544 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26545 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26546 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26547 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
26548 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
26549 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26550 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26551 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
26552 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26553 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26554 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26555 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26556 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
26557 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26558 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26559 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26560 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26561 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
26562 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
26563 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
26564 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26565 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26566 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
26567 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26568 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26569 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26570 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26571 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26572 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
26573 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26574 // CHECK12:       omp.precond.then:
26575 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26576 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26577 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
26578 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26579 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26580 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26581 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
26582 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26583 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26584 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26585 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
26586 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26587 // CHECK12:       cond.true:
26588 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26589 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26590 // CHECK12:       cond.false:
26591 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26592 // CHECK12-NEXT:    br label [[COND_END]]
26593 // CHECK12:       cond.end:
26594 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
26595 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26596 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26597 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
26598 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26599 // CHECK12:       omp.inner.for.cond:
26600 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26601 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
26602 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
26603 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26604 // CHECK12:       omp.inner.for.body:
26605 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
26606 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
26607 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
26608 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
26609 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
26610 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
26611 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26612 // CHECK12:       omp.inner.for.inc:
26613 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26614 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
26615 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
26616 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
26617 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
26618 // CHECK12:       omp.inner.for.end:
26619 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26620 // CHECK12:       omp.loop.exit:
26621 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26622 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
26623 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
26624 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26625 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
26626 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26627 // CHECK12:       .omp.final.then:
26628 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26629 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
26630 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
26631 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
26632 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
26633 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
26634 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26635 // CHECK12:       .omp.final.done:
26636 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26637 // CHECK12:       omp.precond.end:
26638 // CHECK12-NEXT:    ret void
26639 //
26640 //
26641 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..43
26642 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
26643 // CHECK12-NEXT:  entry:
26644 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26645 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26646 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26647 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26648 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26649 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26650 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26651 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26652 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
26653 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26654 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26655 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26656 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26657 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26658 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26659 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26660 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26661 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26662 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
26663 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26664 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26665 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26666 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26667 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26668 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26669 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26670 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26671 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26672 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26673 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26674 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26675 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26676 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26677 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26678 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26679 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26680 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26681 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
26682 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26683 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26684 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26685 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26686 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26687 // CHECK12:       omp.precond.then:
26688 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26689 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26690 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26691 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26692 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26693 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26694 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26695 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26696 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26697 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
26698 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26699 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
26700 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
26701 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26702 // CHECK12:       omp.dispatch.cond:
26703 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26704 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26705 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]]
26706 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26707 // CHECK12:       cond.true:
26708 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26709 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26710 // CHECK12:       cond.false:
26711 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26712 // CHECK12-NEXT:    br label [[COND_END]]
26713 // CHECK12:       cond.end:
26714 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
26715 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
26716 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26717 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
26718 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
26719 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26720 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
26721 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26722 // CHECK12:       omp.dispatch.body:
26723 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26724 // CHECK12:       omp.inner.for.cond:
26725 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26726 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
26727 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
26728 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26729 // CHECK12:       omp.inner.for.body:
26730 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26731 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
26732 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26733 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
26734 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
26735 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26736 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
26737 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
26738 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
26739 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26740 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
26741 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
26742 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
26743 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
26744 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
26745 // CHECK12-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
26746 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
26747 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26748 // CHECK12:       omp.body.continue:
26749 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26750 // CHECK12:       omp.inner.for.inc:
26751 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26752 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
26753 // CHECK12-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
26754 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
26755 // CHECK12:       omp.inner.for.end:
26756 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
26757 // CHECK12:       omp.dispatch.inc:
26758 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26759 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
26760 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
26761 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
26762 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26763 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
26764 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
26765 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
26766 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
26767 // CHECK12:       omp.dispatch.end:
26768 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26769 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
26770 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
26771 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26772 // CHECK12-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
26773 // CHECK12-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26774 // CHECK12:       .omp.final.then:
26775 // CHECK12-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26776 // CHECK12-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
26777 // CHECK12-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
26778 // CHECK12-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
26779 // CHECK12-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
26780 // CHECK12-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
26781 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26782 // CHECK12:       .omp.final.done:
26783 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26784 // CHECK12:       omp.precond.end:
26785 // CHECK12-NEXT:    ret void
26786 //
26787 //
26788 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
26789 // CHECK12-SAME: (i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
26790 // CHECK12-NEXT:  entry:
26791 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26792 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
26793 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
26794 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
26795 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26796 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
26797 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
26798 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
26799 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
26800 // CHECK12-NEXT:    ret void
26801 //
26802 //
26803 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..46
26804 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26805 // CHECK12-NEXT:  entry:
26806 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26807 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26808 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26809 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26810 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26811 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26812 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26813 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26814 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26815 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26816 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26817 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
26818 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
26819 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26820 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26821 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26822 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26823 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26824 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26825 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26826 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26827 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26828 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26829 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26830 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26831 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26832 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26833 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26834 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26835 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26836 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26837 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26838 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26839 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26840 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26841 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26842 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26843 // CHECK12:       omp.precond.then:
26844 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
26845 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26846 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
26847 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26848 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26849 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26850 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
26851 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
26852 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26853 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26854 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
26855 // CHECK12-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
26856 // CHECK12:       cond.true:
26857 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26858 // CHECK12-NEXT:    br label [[COND_END:%.*]]
26859 // CHECK12:       cond.false:
26860 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
26861 // CHECK12-NEXT:    br label [[COND_END]]
26862 // CHECK12:       cond.end:
26863 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
26864 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
26865 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
26866 // CHECK12-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
26867 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26868 // CHECK12:       omp.inner.for.cond:
26869 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26870 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
26871 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
26872 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26873 // CHECK12:       omp.inner.for.body:
26874 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
26875 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
26876 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
26877 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26878 // CHECK12:       omp.inner.for.inc:
26879 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26880 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
26881 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
26882 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
26883 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
26884 // CHECK12:       omp.inner.for.end:
26885 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
26886 // CHECK12:       omp.loop.exit:
26887 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26888 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
26889 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
26890 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
26891 // CHECK12-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
26892 // CHECK12-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
26893 // CHECK12:       .omp.final.then:
26894 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26895 // CHECK12-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
26896 // CHECK12-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
26897 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
26898 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
26899 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
26900 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
26901 // CHECK12:       .omp.final.done:
26902 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
26903 // CHECK12:       omp.precond.end:
26904 // CHECK12-NEXT:    ret void
26905 //
26906 //
26907 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..47
26908 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
26909 // CHECK12-NEXT:  entry:
26910 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
26911 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
26912 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
26913 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
26914 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
26915 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
26916 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
26917 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
26918 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26919 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26920 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26921 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26922 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
26923 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26924 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26925 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
26926 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
26927 // CHECK12-NEXT:    [[I3:%.*]] = alloca i32, align 4
26928 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
26929 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
26930 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26931 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26932 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
26933 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
26934 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
26935 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
26936 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
26937 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
26938 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
26939 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
26940 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
26941 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26942 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26943 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
26944 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
26945 // CHECK12-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
26946 // CHECK12-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26947 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
26948 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26949 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
26950 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
26951 // CHECK12:       omp.precond.then:
26952 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26953 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26954 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
26955 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
26956 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
26957 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
26958 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
26959 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
26960 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
26961 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26962 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
26963 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26964 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
26965 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
26966 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
26967 // CHECK12:       omp.dispatch.cond:
26968 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
26969 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
26970 // CHECK12-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
26971 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
26972 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
26973 // CHECK12:       omp.dispatch.body:
26974 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26975 // CHECK12-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
26976 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26977 // CHECK12:       omp.inner.for.cond:
26978 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
26979 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
26980 // CHECK12-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
26981 // CHECK12-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26982 // CHECK12:       omp.inner.for.body:
26983 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
26984 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
26985 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26986 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
26987 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
26988 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26989 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
26990 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
26991 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
26992 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26993 // CHECK12-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
26994 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
26995 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
26996 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
26997 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
26998 // CHECK12-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
26999 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
27000 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27001 // CHECK12:       omp.body.continue:
27002 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27003 // CHECK12:       omp.inner.for.inc:
27004 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
27005 // CHECK12-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
27006 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
27007 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
27008 // CHECK12:       omp.inner.for.end:
27009 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27010 // CHECK12:       omp.dispatch.inc:
27011 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
27012 // CHECK12:       omp.dispatch.end:
27013 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27014 // CHECK12-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
27015 // CHECK12-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27016 // CHECK12:       .omp.final.then:
27017 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27018 // CHECK12-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
27019 // CHECK12-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
27020 // CHECK12-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
27021 // CHECK12-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
27022 // CHECK12-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
27023 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27024 // CHECK12:       .omp.final.done:
27025 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27026 // CHECK12:       omp.precond.end:
27027 // CHECK12-NEXT:    ret void
27028 //
27029 //
27030 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
27031 // CHECK12-SAME: (i32 [[CH:%.*]], i32 [[N:%.*]], i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) #[[ATTR1]] {
27032 // CHECK12-NEXT:  entry:
27033 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
27034 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27035 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
27036 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
27037 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
27038 // CHECK12-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
27039 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27040 // CHECK12-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
27041 // CHECK12-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
27042 // CHECK12-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
27043 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
27044 // CHECK12-NEXT:    ret void
27045 //
27046 //
27047 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..50
27048 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
27049 // CHECK12-NEXT:  entry:
27050 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27051 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27052 // CHECK12-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
27053 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
27054 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
27055 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
27056 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
27057 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27058 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27059 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27060 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27061 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27062 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
27063 // CHECK12-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
27064 // CHECK12-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
27065 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27066 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27067 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
27068 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
27069 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27070 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27071 // CHECK12-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
27072 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
27073 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
27074 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
27075 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
27076 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
27077 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
27078 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
27079 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
27080 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
27081 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
27082 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
27083 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
27084 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27085 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27086 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
27087 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27088 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
27089 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27090 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
27091 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27092 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
27093 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27094 // CHECK12:       omp.precond.then:
27095 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
27096 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27097 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
27098 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27099 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27100 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27101 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
27102 // CHECK12-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
27103 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27104 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27105 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
27106 // CHECK12-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
27107 // CHECK12:       cond.true:
27108 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27109 // CHECK12-NEXT:    br label [[COND_END:%.*]]
27110 // CHECK12:       cond.false:
27111 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
27112 // CHECK12-NEXT:    br label [[COND_END]]
27113 // CHECK12:       cond.end:
27114 // CHECK12-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
27115 // CHECK12-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
27116 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
27117 // CHECK12-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
27118 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27119 // CHECK12:       omp.inner.for.cond:
27120 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27121 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
27122 // CHECK12-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
27123 // CHECK12-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27124 // CHECK12:       omp.inner.for.body:
27125 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
27126 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
27127 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
27128 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
27129 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
27130 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
27131 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27132 // CHECK12:       omp.inner.for.inc:
27133 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27134 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
27135 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
27136 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
27137 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
27138 // CHECK12:       omp.inner.for.end:
27139 // CHECK12-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
27140 // CHECK12:       omp.loop.exit:
27141 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27142 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
27143 // CHECK12-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
27144 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27145 // CHECK12-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
27146 // CHECK12-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27147 // CHECK12:       .omp.final.then:
27148 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27149 // CHECK12-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
27150 // CHECK12-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
27151 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
27152 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
27153 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
27154 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27155 // CHECK12:       .omp.final.done:
27156 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27157 // CHECK12:       omp.precond.end:
27158 // CHECK12-NEXT:    ret void
27159 //
27160 //
27161 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..51
27162 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], i32** nonnull align 4 dereferenceable(4) [[A:%.*]], i32** nonnull align 4 dereferenceable(4) [[B:%.*]], i32** nonnull align 4 dereferenceable(4) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
27163 // CHECK12-NEXT:  entry:
27164 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
27165 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
27166 // CHECK12-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
27167 // CHECK12-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
27168 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
27169 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
27170 // CHECK12-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
27171 // CHECK12-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
27172 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
27173 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27174 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27175 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27176 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27177 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
27178 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27179 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27180 // CHECK12-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
27181 // CHECK12-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
27182 // CHECK12-NEXT:    [[I4:%.*]] = alloca i32, align 4
27183 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
27184 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
27185 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27186 // CHECK12-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27187 // CHECK12-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
27188 // CHECK12-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
27189 // CHECK12-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
27190 // CHECK12-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
27191 // CHECK12-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27192 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
27193 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
27194 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
27195 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
27196 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
27197 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27198 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27199 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
27200 // CHECK12-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27201 // CHECK12-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
27202 // CHECK12-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27203 // CHECK12-NEXT:    store i32 0, i32* [[I]], align 4
27204 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27205 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
27206 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
27207 // CHECK12:       omp.precond.then:
27208 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27209 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27210 // CHECK12-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
27211 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
27212 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
27213 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
27214 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
27215 // CHECK12-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
27216 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
27217 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
27218 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27219 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
27220 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27221 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
27222 // CHECK12-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
27223 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
27224 // CHECK12:       omp.dispatch.cond:
27225 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
27226 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
27227 // CHECK12-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
27228 // CHECK12-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
27229 // CHECK12-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
27230 // CHECK12:       omp.dispatch.body:
27231 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27232 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
27233 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27234 // CHECK12:       omp.inner.for.cond:
27235 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27236 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
27237 // CHECK12-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
27238 // CHECK12-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27239 // CHECK12:       omp.inner.for.body:
27240 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27241 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
27242 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27243 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
27244 // CHECK12-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
27245 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27246 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
27247 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
27248 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
27249 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27250 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
27251 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
27252 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
27253 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
27254 // CHECK12-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
27255 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
27256 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
27257 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27258 // CHECK12:       omp.body.continue:
27259 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27260 // CHECK12:       omp.inner.for.inc:
27261 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27262 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
27263 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
27264 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
27265 // CHECK12:       omp.inner.for.end:
27266 // CHECK12-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
27267 // CHECK12:       omp.dispatch.inc:
27268 // CHECK12-NEXT:    br label [[OMP_DISPATCH_COND]]
27269 // CHECK12:       omp.dispatch.end:
27270 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
27271 // CHECK12-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
27272 // CHECK12-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
27273 // CHECK12:       .omp.final.then:
27274 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27275 // CHECK12-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
27276 // CHECK12-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
27277 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
27278 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
27279 // CHECK12-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
27280 // CHECK12-NEXT:    br label [[DOTOMP_FINAL_DONE]]
27281 // CHECK12:       .omp.final.done:
27282 // CHECK12-NEXT:    br label [[OMP_PRECOND_END]]
27283 // CHECK12:       omp.precond.end:
27284 // CHECK12-NEXT:    ret void
27285 //
27286 //
27287 // CHECK12-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
27288 // CHECK12-SAME: () #[[ATTR4:[0-9]+]] {
27289 // CHECK12-NEXT:  entry:
27290 // CHECK12-NEXT:    call void @__tgt_register_requires(i64 1)
27291 // CHECK12-NEXT:    ret void
27292 //
27293 //
27294 // CHECK13-LABEL: define {{[^@]+}}@main
27295 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
27296 // CHECK13-NEXT:  entry:
27297 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
27298 // CHECK13-NEXT:    [[A:%.*]] = alloca double*, align 8
27299 // CHECK13-NEXT:    [[B:%.*]] = alloca double*, align 8
27300 // CHECK13-NEXT:    [[C:%.*]] = alloca double*, align 8
27301 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
27302 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
27303 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27304 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27305 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27306 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27307 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27308 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
27309 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27310 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
27311 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
27312 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
27313 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
27314 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27315 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27316 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
27317 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
27318 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
27319 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
27320 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
27321 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
27322 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27323 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27324 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
27325 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
27326 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
27327 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
27328 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
27329 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
27330 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
27331 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
27332 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
27333 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
27334 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
27335 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
27336 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
27337 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
27338 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
27339 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
27340 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
27341 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
27342 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
27343 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
27344 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
27345 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
27346 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
27347 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
27348 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
27349 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
27350 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
27351 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
27352 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
27353 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
27354 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
27355 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
27356 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
27357 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
27358 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
27359 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
27360 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
27361 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
27362 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
27363 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
27364 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
27365 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27366 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27367 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
27368 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27369 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27370 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27371 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27372 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27373 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27374 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
27375 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27376 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
27377 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27378 // CHECK13:       simd.if.then:
27379 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27380 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27381 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27382 // CHECK13:       omp.inner.for.cond:
27383 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27384 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
27385 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27386 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27387 // CHECK13:       omp.inner.for.body:
27388 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27389 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27390 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27391 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
27392 // CHECK13-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
27393 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27394 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
27395 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
27396 // CHECK13-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
27397 // CHECK13-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
27398 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27399 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
27400 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
27401 // CHECK13-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
27402 // CHECK13-NEXT:    [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
27403 // CHECK13-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
27404 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
27405 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
27406 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
27407 // CHECK13-NEXT:    store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
27408 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27409 // CHECK13:       omp.body.continue:
27410 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27411 // CHECK13:       omp.inner.for.inc:
27412 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27413 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
27414 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27415 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
27416 // CHECK13:       omp.inner.for.end:
27417 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27418 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
27419 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
27420 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
27421 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27422 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
27423 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
27424 // CHECK13:       simd.if.end:
27425 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
27426 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
27427 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27428 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
27429 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
27430 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
27431 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
27432 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27433 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
27434 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
27435 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
27436 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27437 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
27438 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
27439 // CHECK13:       simd.if.then25:
27440 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27441 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
27442 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
27443 // CHECK13:       omp.inner.for.cond28:
27444 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27445 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
27446 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
27447 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
27448 // CHECK13:       omp.inner.for.body30:
27449 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27450 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
27451 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
27452 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
27453 // CHECK13-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
27454 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27455 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
27456 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
27457 // CHECK13-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
27458 // CHECK13-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
27459 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27460 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
27461 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
27462 // CHECK13-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
27463 // CHECK13-NEXT:    [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
27464 // CHECK13-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
27465 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
27466 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
27467 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
27468 // CHECK13-NEXT:    store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
27469 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
27470 // CHECK13:       omp.body.continue40:
27471 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
27472 // CHECK13:       omp.inner.for.inc41:
27473 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27474 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
27475 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
27476 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
27477 // CHECK13:       omp.inner.for.end43:
27478 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27479 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
27480 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
27481 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
27482 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
27483 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
27484 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
27485 // CHECK13:       simd.if.end48:
27486 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
27487 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
27488 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27489 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
27490 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
27491 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
27492 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
27493 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27494 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
27495 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
27496 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
27497 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27498 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
27499 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
27500 // CHECK13:       simd.if.then59:
27501 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
27502 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
27503 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
27504 // CHECK13:       omp.inner.for.cond62:
27505 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27506 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
27507 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
27508 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
27509 // CHECK13:       omp.inner.for.body64:
27510 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27511 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
27512 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
27513 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
27514 // CHECK13-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
27515 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27516 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
27517 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
27518 // CHECK13-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
27519 // CHECK13-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
27520 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27521 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
27522 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
27523 // CHECK13-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
27524 // CHECK13-NEXT:    [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
27525 // CHECK13-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
27526 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
27527 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
27528 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
27529 // CHECK13-NEXT:    store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
27530 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
27531 // CHECK13:       omp.body.continue74:
27532 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
27533 // CHECK13:       omp.inner.for.inc75:
27534 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27535 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
27536 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
27537 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
27538 // CHECK13:       omp.inner.for.end77:
27539 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27540 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
27541 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
27542 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
27543 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
27544 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
27545 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
27546 // CHECK13:       simd.if.end82:
27547 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
27548 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
27549 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27550 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
27551 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
27552 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
27553 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
27554 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
27555 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
27556 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
27557 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
27558 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27559 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
27560 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
27561 // CHECK13:       simd.if.then93:
27562 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
27563 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
27564 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
27565 // CHECK13:       omp.inner.for.cond96:
27566 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27567 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
27568 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
27569 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
27570 // CHECK13:       omp.inner.for.body98:
27571 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27572 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
27573 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
27574 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
27575 // CHECK13-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
27576 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27577 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
27578 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
27579 // CHECK13-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
27580 // CHECK13-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
27581 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27582 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
27583 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
27584 // CHECK13-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
27585 // CHECK13-NEXT:    [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
27586 // CHECK13-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
27587 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
27588 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
27589 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
27590 // CHECK13-NEXT:    store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
27591 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
27592 // CHECK13:       omp.body.continue108:
27593 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
27594 // CHECK13:       omp.inner.for.inc109:
27595 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27596 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
27597 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
27598 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
27599 // CHECK13:       omp.inner.for.end111:
27600 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
27601 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
27602 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
27603 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
27604 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
27605 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
27606 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
27607 // CHECK13:       simd.if.end116:
27608 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
27609 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
27610 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
27611 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
27612 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27613 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
27614 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
27615 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
27616 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
27617 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
27618 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
27619 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
27620 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
27621 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27622 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
27623 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
27624 // CHECK13:       simd.if.then128:
27625 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
27626 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
27627 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
27628 // CHECK13:       omp.inner.for.cond131:
27629 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27630 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
27631 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
27632 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
27633 // CHECK13:       omp.inner.for.body133:
27634 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27635 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
27636 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
27637 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
27638 // CHECK13-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
27639 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27640 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
27641 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
27642 // CHECK13-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
27643 // CHECK13-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
27644 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27645 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
27646 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
27647 // CHECK13-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
27648 // CHECK13-NEXT:    [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
27649 // CHECK13-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
27650 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
27651 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
27652 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
27653 // CHECK13-NEXT:    store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
27654 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
27655 // CHECK13:       omp.body.continue143:
27656 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
27657 // CHECK13:       omp.inner.for.inc144:
27658 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27659 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
27660 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
27661 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
27662 // CHECK13:       omp.inner.for.end146:
27663 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
27664 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
27665 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
27666 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
27667 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
27668 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
27669 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
27670 // CHECK13:       simd.if.end151:
27671 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
27672 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
27673 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27674 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
27675 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
27676 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
27677 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
27678 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
27679 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
27680 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
27681 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
27682 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27683 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
27684 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
27685 // CHECK13:       simd.if.then162:
27686 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
27687 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
27688 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
27689 // CHECK13:       omp.inner.for.cond165:
27690 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27691 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
27692 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
27693 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
27694 // CHECK13:       omp.inner.for.body167:
27695 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27696 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
27697 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
27698 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
27699 // CHECK13-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
27700 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27701 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
27702 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
27703 // CHECK13-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
27704 // CHECK13-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
27705 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27706 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
27707 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
27708 // CHECK13-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
27709 // CHECK13-NEXT:    [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
27710 // CHECK13-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
27711 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
27712 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
27713 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
27714 // CHECK13-NEXT:    store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
27715 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
27716 // CHECK13:       omp.body.continue177:
27717 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
27718 // CHECK13:       omp.inner.for.inc178:
27719 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27720 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
27721 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
27722 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
27723 // CHECK13:       omp.inner.for.end180:
27724 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
27725 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
27726 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
27727 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
27728 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
27729 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
27730 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
27731 // CHECK13:       simd.if.end185:
27732 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
27733 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
27734 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
27735 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
27736 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27737 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
27738 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
27739 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
27740 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
27741 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
27742 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
27743 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
27744 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
27745 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27746 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
27747 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
27748 // CHECK13:       simd.if.then197:
27749 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
27750 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
27751 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
27752 // CHECK13:       omp.inner.for.cond200:
27753 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27754 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
27755 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
27756 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
27757 // CHECK13:       omp.inner.for.body202:
27758 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27759 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
27760 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
27761 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
27762 // CHECK13-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
27763 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27764 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
27765 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
27766 // CHECK13-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
27767 // CHECK13-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
27768 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27769 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
27770 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
27771 // CHECK13-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
27772 // CHECK13-NEXT:    [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
27773 // CHECK13-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
27774 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
27775 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
27776 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
27777 // CHECK13-NEXT:    store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
27778 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
27779 // CHECK13:       omp.body.continue212:
27780 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
27781 // CHECK13:       omp.inner.for.inc213:
27782 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27783 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
27784 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
27785 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
27786 // CHECK13:       omp.inner.for.end215:
27787 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
27788 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
27789 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
27790 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
27791 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
27792 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
27793 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
27794 // CHECK13:       simd.if.end220:
27795 // CHECK13-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
27796 // CHECK13-NEXT:    ret i32 [[CALL]]
27797 //
27798 //
27799 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
27800 // CHECK13-SAME: () #[[ATTR1:[0-9]+]] comdat {
27801 // CHECK13-NEXT:  entry:
27802 // CHECK13-NEXT:    [[A:%.*]] = alloca i32*, align 8
27803 // CHECK13-NEXT:    [[B:%.*]] = alloca i32*, align 8
27804 // CHECK13-NEXT:    [[C:%.*]] = alloca i32*, align 8
27805 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
27806 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
27807 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27808 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27809 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27810 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27811 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27812 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
27813 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27814 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
27815 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
27816 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
27817 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
27818 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27819 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27820 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
27821 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
27822 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
27823 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
27824 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
27825 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
27826 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27827 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27828 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
27829 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
27830 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
27831 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
27832 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
27833 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
27834 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
27835 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
27836 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
27837 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
27838 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
27839 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
27840 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
27841 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
27842 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
27843 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
27844 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
27845 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
27846 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
27847 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
27848 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
27849 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
27850 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
27851 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
27852 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
27853 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
27854 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
27855 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
27856 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
27857 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
27858 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
27859 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
27860 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
27861 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
27862 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
27863 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
27864 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
27865 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
27866 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
27867 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
27868 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27869 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27870 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
27871 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
27872 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
27873 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27874 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27875 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27876 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
27877 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
27878 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27879 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
27880 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27881 // CHECK13:       simd.if.then:
27882 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27883 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27884 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27885 // CHECK13:       omp.inner.for.cond:
27886 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27887 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
27888 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27889 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27890 // CHECK13:       omp.inner.for.body:
27891 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27892 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27893 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27894 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
27895 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
27896 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27897 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
27898 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
27899 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
27900 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
27901 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27902 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
27903 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
27904 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
27905 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
27906 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
27907 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
27908 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
27909 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
27910 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
27911 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27912 // CHECK13:       omp.body.continue:
27913 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27914 // CHECK13:       omp.inner.for.inc:
27915 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27916 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
27917 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
27918 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
27919 // CHECK13:       omp.inner.for.end:
27920 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27921 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
27922 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
27923 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
27924 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27925 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
27926 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
27927 // CHECK13:       simd.if.end:
27928 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
27929 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
27930 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27931 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
27932 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
27933 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
27934 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
27935 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27936 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
27937 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
27938 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
27939 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27940 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
27941 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
27942 // CHECK13:       simd.if.then25:
27943 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27944 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
27945 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
27946 // CHECK13:       omp.inner.for.cond28:
27947 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27948 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
27949 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
27950 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
27951 // CHECK13:       omp.inner.for.body30:
27952 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27953 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
27954 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
27955 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
27956 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
27957 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27958 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
27959 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
27960 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
27961 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
27962 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27963 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
27964 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
27965 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
27966 // CHECK13-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
27967 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
27968 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
27969 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
27970 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
27971 // CHECK13-NEXT:    store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
27972 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
27973 // CHECK13:       omp.body.continue40:
27974 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
27975 // CHECK13:       omp.inner.for.inc41:
27976 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27977 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
27978 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
27979 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
27980 // CHECK13:       omp.inner.for.end43:
27981 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
27982 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
27983 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
27984 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
27985 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
27986 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
27987 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
27988 // CHECK13:       simd.if.end48:
27989 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
27990 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
27991 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
27992 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
27993 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
27994 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
27995 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
27996 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27997 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
27998 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
27999 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
28000 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28001 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
28002 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
28003 // CHECK13:       simd.if.then59:
28004 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
28005 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
28006 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
28007 // CHECK13:       omp.inner.for.cond62:
28008 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28009 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
28010 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
28011 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
28012 // CHECK13:       omp.inner.for.body64:
28013 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28014 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
28015 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
28016 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
28017 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
28018 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28019 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
28020 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
28021 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
28022 // CHECK13-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
28023 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28024 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
28025 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
28026 // CHECK13-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
28027 // CHECK13-NEXT:    [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
28028 // CHECK13-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
28029 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
28030 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
28031 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
28032 // CHECK13-NEXT:    store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
28033 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
28034 // CHECK13:       omp.body.continue74:
28035 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
28036 // CHECK13:       omp.inner.for.inc75:
28037 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28038 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
28039 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
28040 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
28041 // CHECK13:       omp.inner.for.end77:
28042 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28043 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
28044 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
28045 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
28046 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
28047 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
28048 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
28049 // CHECK13:       simd.if.end82:
28050 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
28051 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
28052 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28053 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
28054 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
28055 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
28056 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
28057 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
28058 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
28059 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
28060 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
28061 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28062 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
28063 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
28064 // CHECK13:       simd.if.then93:
28065 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
28066 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
28067 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
28068 // CHECK13:       omp.inner.for.cond96:
28069 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28070 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
28071 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
28072 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
28073 // CHECK13:       omp.inner.for.body98:
28074 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28075 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
28076 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
28077 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
28078 // CHECK13-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
28079 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28080 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
28081 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
28082 // CHECK13-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
28083 // CHECK13-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
28084 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28085 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
28086 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
28087 // CHECK13-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
28088 // CHECK13-NEXT:    [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
28089 // CHECK13-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
28090 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
28091 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
28092 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
28093 // CHECK13-NEXT:    store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
28094 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
28095 // CHECK13:       omp.body.continue108:
28096 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
28097 // CHECK13:       omp.inner.for.inc109:
28098 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28099 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
28100 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
28101 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
28102 // CHECK13:       omp.inner.for.end111:
28103 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28104 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
28105 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
28106 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
28107 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
28108 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
28109 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
28110 // CHECK13:       simd.if.end116:
28111 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
28112 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
28113 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
28114 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
28115 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28116 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
28117 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
28118 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
28119 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
28120 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
28121 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
28122 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
28123 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
28124 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28125 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
28126 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
28127 // CHECK13:       simd.if.then128:
28128 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
28129 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
28130 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
28131 // CHECK13:       omp.inner.for.cond131:
28132 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28133 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
28134 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
28135 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
28136 // CHECK13:       omp.inner.for.body133:
28137 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28138 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
28139 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
28140 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
28141 // CHECK13-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
28142 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28143 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
28144 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
28145 // CHECK13-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
28146 // CHECK13-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
28147 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28148 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
28149 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
28150 // CHECK13-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
28151 // CHECK13-NEXT:    [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
28152 // CHECK13-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
28153 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
28154 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
28155 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
28156 // CHECK13-NEXT:    store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
28157 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
28158 // CHECK13:       omp.body.continue143:
28159 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
28160 // CHECK13:       omp.inner.for.inc144:
28161 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28162 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
28163 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
28164 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
28165 // CHECK13:       omp.inner.for.end146:
28166 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28167 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
28168 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
28169 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
28170 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
28171 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
28172 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
28173 // CHECK13:       simd.if.end151:
28174 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
28175 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
28176 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28177 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
28178 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
28179 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
28180 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
28181 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
28182 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
28183 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
28184 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
28185 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28186 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
28187 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
28188 // CHECK13:       simd.if.then162:
28189 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
28190 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
28191 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
28192 // CHECK13:       omp.inner.for.cond165:
28193 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28194 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
28195 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
28196 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
28197 // CHECK13:       omp.inner.for.body167:
28198 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28199 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
28200 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
28201 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
28202 // CHECK13-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
28203 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28204 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
28205 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
28206 // CHECK13-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
28207 // CHECK13-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
28208 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28209 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
28210 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
28211 // CHECK13-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
28212 // CHECK13-NEXT:    [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
28213 // CHECK13-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
28214 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
28215 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
28216 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
28217 // CHECK13-NEXT:    store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
28218 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
28219 // CHECK13:       omp.body.continue177:
28220 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
28221 // CHECK13:       omp.inner.for.inc178:
28222 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28223 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
28224 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
28225 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
28226 // CHECK13:       omp.inner.for.end180:
28227 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28228 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
28229 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
28230 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
28231 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
28232 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
28233 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
28234 // CHECK13:       simd.if.end185:
28235 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
28236 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
28237 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
28238 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
28239 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28240 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
28241 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
28242 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
28243 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
28244 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
28245 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
28246 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
28247 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
28248 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28249 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
28250 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
28251 // CHECK13:       simd.if.then197:
28252 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
28253 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
28254 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
28255 // CHECK13:       omp.inner.for.cond200:
28256 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28257 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
28258 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
28259 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
28260 // CHECK13:       omp.inner.for.body202:
28261 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28262 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
28263 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
28264 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
28265 // CHECK13-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
28266 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28267 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
28268 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
28269 // CHECK13-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
28270 // CHECK13-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
28271 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28272 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
28273 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
28274 // CHECK13-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
28275 // CHECK13-NEXT:    [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
28276 // CHECK13-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
28277 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
28278 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
28279 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
28280 // CHECK13-NEXT:    store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
28281 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
28282 // CHECK13:       omp.body.continue212:
28283 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
28284 // CHECK13:       omp.inner.for.inc213:
28285 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28286 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
28287 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
28288 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
28289 // CHECK13:       omp.inner.for.end215:
28290 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28291 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
28292 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
28293 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
28294 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
28295 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
28296 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
28297 // CHECK13:       simd.if.end220:
28298 // CHECK13-NEXT:    ret i32 0
28299 //
28300 //
28301 // CHECK14-LABEL: define {{[^@]+}}@main
28302 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
28303 // CHECK14-NEXT:  entry:
28304 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
28305 // CHECK14-NEXT:    [[A:%.*]] = alloca double*, align 8
28306 // CHECK14-NEXT:    [[B:%.*]] = alloca double*, align 8
28307 // CHECK14-NEXT:    [[C:%.*]] = alloca double*, align 8
28308 // CHECK14-NEXT:    [[N:%.*]] = alloca i32, align 4
28309 // CHECK14-NEXT:    [[CH:%.*]] = alloca i32, align 4
28310 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28311 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28312 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28313 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28314 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28315 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
28316 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28317 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
28318 // CHECK14-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
28319 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
28320 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
28321 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28322 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28323 // CHECK14-NEXT:    [[I23:%.*]] = alloca i32, align 4
28324 // CHECK14-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
28325 // CHECK14-NEXT:    [[I27:%.*]] = alloca i32, align 4
28326 // CHECK14-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
28327 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
28328 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
28329 // CHECK14-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
28330 // CHECK14-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
28331 // CHECK14-NEXT:    [[I57:%.*]] = alloca i32, align 4
28332 // CHECK14-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
28333 // CHECK14-NEXT:    [[I61:%.*]] = alloca i32, align 4
28334 // CHECK14-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
28335 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
28336 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
28337 // CHECK14-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
28338 // CHECK14-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
28339 // CHECK14-NEXT:    [[I91:%.*]] = alloca i32, align 4
28340 // CHECK14-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
28341 // CHECK14-NEXT:    [[I95:%.*]] = alloca i32, align 4
28342 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
28343 // CHECK14-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
28344 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
28345 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
28346 // CHECK14-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
28347 // CHECK14-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
28348 // CHECK14-NEXT:    [[I126:%.*]] = alloca i32, align 4
28349 // CHECK14-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
28350 // CHECK14-NEXT:    [[I130:%.*]] = alloca i32, align 4
28351 // CHECK14-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
28352 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
28353 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
28354 // CHECK14-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
28355 // CHECK14-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
28356 // CHECK14-NEXT:    [[I160:%.*]] = alloca i32, align 4
28357 // CHECK14-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
28358 // CHECK14-NEXT:    [[I164:%.*]] = alloca i32, align 4
28359 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
28360 // CHECK14-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
28361 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
28362 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
28363 // CHECK14-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
28364 // CHECK14-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
28365 // CHECK14-NEXT:    [[I195:%.*]] = alloca i32, align 4
28366 // CHECK14-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
28367 // CHECK14-NEXT:    [[I199:%.*]] = alloca i32, align 4
28368 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
28369 // CHECK14-NEXT:    store i32 10000, i32* [[N]], align 4
28370 // CHECK14-NEXT:    store i32 100, i32* [[CH]], align 4
28371 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
28372 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28373 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28374 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
28375 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28376 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28377 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28378 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28379 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28380 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
28381 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
28382 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28383 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
28384 // CHECK14-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28385 // CHECK14:       simd.if.then:
28386 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28387 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
28388 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28389 // CHECK14:       omp.inner.for.cond:
28390 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28391 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
28392 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
28393 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28394 // CHECK14:       omp.inner.for.body:
28395 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28396 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
28397 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28398 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
28399 // CHECK14-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
28400 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28401 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
28402 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
28403 // CHECK14-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
28404 // CHECK14-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
28405 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28406 // CHECK14-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
28407 // CHECK14-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
28408 // CHECK14-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
28409 // CHECK14-NEXT:    [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
28410 // CHECK14-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
28411 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
28412 // CHECK14-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
28413 // CHECK14-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
28414 // CHECK14-NEXT:    store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
28415 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28416 // CHECK14:       omp.body.continue:
28417 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28418 // CHECK14:       omp.inner.for.inc:
28419 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28420 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
28421 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28422 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
28423 // CHECK14:       omp.inner.for.end:
28424 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28425 // CHECK14-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
28426 // CHECK14-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
28427 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
28428 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
28429 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
28430 // CHECK14-NEXT:    br label [[SIMD_IF_END]]
28431 // CHECK14:       simd.if.end:
28432 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
28433 // CHECK14-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
28434 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28435 // CHECK14-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
28436 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
28437 // CHECK14-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
28438 // CHECK14-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
28439 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
28440 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
28441 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
28442 // CHECK14-NEXT:    store i32 0, i32* [[I23]], align 4
28443 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28444 // CHECK14-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
28445 // CHECK14-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
28446 // CHECK14:       simd.if.then25:
28447 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
28448 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
28449 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
28450 // CHECK14:       omp.inner.for.cond28:
28451 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28452 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
28453 // CHECK14-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
28454 // CHECK14-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
28455 // CHECK14:       omp.inner.for.body30:
28456 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28457 // CHECK14-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
28458 // CHECK14-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
28459 // CHECK14-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
28460 // CHECK14-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
28461 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28462 // CHECK14-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
28463 // CHECK14-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
28464 // CHECK14-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
28465 // CHECK14-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
28466 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28467 // CHECK14-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
28468 // CHECK14-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
28469 // CHECK14-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
28470 // CHECK14-NEXT:    [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
28471 // CHECK14-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
28472 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
28473 // CHECK14-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
28474 // CHECK14-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
28475 // CHECK14-NEXT:    store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
28476 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
28477 // CHECK14:       omp.body.continue40:
28478 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
28479 // CHECK14:       omp.inner.for.inc41:
28480 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28481 // CHECK14-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
28482 // CHECK14-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
28483 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
28484 // CHECK14:       omp.inner.for.end43:
28485 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28486 // CHECK14-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
28487 // CHECK14-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
28488 // CHECK14-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
28489 // CHECK14-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
28490 // CHECK14-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
28491 // CHECK14-NEXT:    br label [[SIMD_IF_END48]]
28492 // CHECK14:       simd.if.end48:
28493 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
28494 // CHECK14-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
28495 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28496 // CHECK14-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
28497 // CHECK14-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
28498 // CHECK14-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
28499 // CHECK14-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
28500 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
28501 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
28502 // CHECK14-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
28503 // CHECK14-NEXT:    store i32 0, i32* [[I57]], align 4
28504 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28505 // CHECK14-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
28506 // CHECK14-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
28507 // CHECK14:       simd.if.then59:
28508 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
28509 // CHECK14-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
28510 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
28511 // CHECK14:       omp.inner.for.cond62:
28512 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28513 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
28514 // CHECK14-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
28515 // CHECK14-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
28516 // CHECK14:       omp.inner.for.body64:
28517 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28518 // CHECK14-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
28519 // CHECK14-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
28520 // CHECK14-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
28521 // CHECK14-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
28522 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28523 // CHECK14-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
28524 // CHECK14-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
28525 // CHECK14-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
28526 // CHECK14-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
28527 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28528 // CHECK14-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
28529 // CHECK14-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
28530 // CHECK14-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
28531 // CHECK14-NEXT:    [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
28532 // CHECK14-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
28533 // CHECK14-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
28534 // CHECK14-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
28535 // CHECK14-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
28536 // CHECK14-NEXT:    store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
28537 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
28538 // CHECK14:       omp.body.continue74:
28539 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
28540 // CHECK14:       omp.inner.for.inc75:
28541 // CHECK14-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28542 // CHECK14-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
28543 // CHECK14-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
28544 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
28545 // CHECK14:       omp.inner.for.end77:
28546 // CHECK14-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28547 // CHECK14-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
28548 // CHECK14-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
28549 // CHECK14-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
28550 // CHECK14-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
28551 // CHECK14-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
28552 // CHECK14-NEXT:    br label [[SIMD_IF_END82]]
28553 // CHECK14:       simd.if.end82:
28554 // CHECK14-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
28555 // CHECK14-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
28556 // CHECK14-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28557 // CHECK14-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
28558 // CHECK14-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
28559 // CHECK14-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
28560 // CHECK14-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
28561 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
28562 // CHECK14-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
28563 // CHECK14-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
28564 // CHECK14-NEXT:    store i32 0, i32* [[I91]], align 4
28565 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28566 // CHECK14-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
28567 // CHECK14-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
28568 // CHECK14:       simd.if.then93:
28569 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
28570 // CHECK14-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
28571 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
28572 // CHECK14:       omp.inner.for.cond96:
28573 // CHECK14-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28574 // CHECK14-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
28575 // CHECK14-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
28576 // CHECK14-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
28577 // CHECK14:       omp.inner.for.body98:
28578 // CHECK14-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28579 // CHECK14-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
28580 // CHECK14-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
28581 // CHECK14-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
28582 // CHECK14-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
28583 // CHECK14-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28584 // CHECK14-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
28585 // CHECK14-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
28586 // CHECK14-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
28587 // CHECK14-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
28588 // CHECK14-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28589 // CHECK14-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
28590 // CHECK14-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
28591 // CHECK14-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
28592 // CHECK14-NEXT:    [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
28593 // CHECK14-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
28594 // CHECK14-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
28595 // CHECK14-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
28596 // CHECK14-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
28597 // CHECK14-NEXT:    store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
28598 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
28599 // CHECK14:       omp.body.continue108:
28600 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
28601 // CHECK14:       omp.inner.for.inc109:
28602 // CHECK14-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28603 // CHECK14-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
28604 // CHECK14-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
28605 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
28606 // CHECK14:       omp.inner.for.end111:
28607 // CHECK14-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
28608 // CHECK14-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
28609 // CHECK14-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
28610 // CHECK14-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
28611 // CHECK14-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
28612 // CHECK14-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
28613 // CHECK14-NEXT:    br label [[SIMD_IF_END116]]
28614 // CHECK14:       simd.if.end116:
28615 // CHECK14-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
28616 // CHECK14-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
28617 // CHECK14-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
28618 // CHECK14-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
28619 // CHECK14-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28620 // CHECK14-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
28621 // CHECK14-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
28622 // CHECK14-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
28623 // CHECK14-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
28624 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
28625 // CHECK14-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
28626 // CHECK14-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
28627 // CHECK14-NEXT:    store i32 0, i32* [[I126]], align 4
28628 // CHECK14-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28629 // CHECK14-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
28630 // CHECK14-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
28631 // CHECK14:       simd.if.then128:
28632 // CHECK14-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
28633 // CHECK14-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
28634 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
28635 // CHECK14:       omp.inner.for.cond131:
28636 // CHECK14-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28637 // CHECK14-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
28638 // CHECK14-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
28639 // CHECK14-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
28640 // CHECK14:       omp.inner.for.body133:
28641 // CHECK14-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28642 // CHECK14-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
28643 // CHECK14-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
28644 // CHECK14-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
28645 // CHECK14-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
28646 // CHECK14-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28647 // CHECK14-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
28648 // CHECK14-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
28649 // CHECK14-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
28650 // CHECK14-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
28651 // CHECK14-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28652 // CHECK14-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
28653 // CHECK14-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
28654 // CHECK14-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
28655 // CHECK14-NEXT:    [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
28656 // CHECK14-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
28657 // CHECK14-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
28658 // CHECK14-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
28659 // CHECK14-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
28660 // CHECK14-NEXT:    store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
28661 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
28662 // CHECK14:       omp.body.continue143:
28663 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
28664 // CHECK14:       omp.inner.for.inc144:
28665 // CHECK14-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28666 // CHECK14-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
28667 // CHECK14-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
28668 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
28669 // CHECK14:       omp.inner.for.end146:
28670 // CHECK14-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
28671 // CHECK14-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
28672 // CHECK14-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
28673 // CHECK14-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
28674 // CHECK14-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
28675 // CHECK14-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
28676 // CHECK14-NEXT:    br label [[SIMD_IF_END151]]
28677 // CHECK14:       simd.if.end151:
28678 // CHECK14-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
28679 // CHECK14-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
28680 // CHECK14-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28681 // CHECK14-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
28682 // CHECK14-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
28683 // CHECK14-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
28684 // CHECK14-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
28685 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
28686 // CHECK14-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
28687 // CHECK14-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
28688 // CHECK14-NEXT:    store i32 0, i32* [[I160]], align 4
28689 // CHECK14-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28690 // CHECK14-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
28691 // CHECK14-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
28692 // CHECK14:       simd.if.then162:
28693 // CHECK14-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
28694 // CHECK14-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
28695 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
28696 // CHECK14:       omp.inner.for.cond165:
28697 // CHECK14-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28698 // CHECK14-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
28699 // CHECK14-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
28700 // CHECK14-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
28701 // CHECK14:       omp.inner.for.body167:
28702 // CHECK14-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28703 // CHECK14-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
28704 // CHECK14-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
28705 // CHECK14-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
28706 // CHECK14-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
28707 // CHECK14-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28708 // CHECK14-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
28709 // CHECK14-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
28710 // CHECK14-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
28711 // CHECK14-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
28712 // CHECK14-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28713 // CHECK14-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
28714 // CHECK14-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
28715 // CHECK14-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
28716 // CHECK14-NEXT:    [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
28717 // CHECK14-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
28718 // CHECK14-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
28719 // CHECK14-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
28720 // CHECK14-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
28721 // CHECK14-NEXT:    store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
28722 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
28723 // CHECK14:       omp.body.continue177:
28724 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
28725 // CHECK14:       omp.inner.for.inc178:
28726 // CHECK14-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28727 // CHECK14-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
28728 // CHECK14-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
28729 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
28730 // CHECK14:       omp.inner.for.end180:
28731 // CHECK14-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
28732 // CHECK14-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
28733 // CHECK14-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
28734 // CHECK14-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
28735 // CHECK14-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
28736 // CHECK14-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
28737 // CHECK14-NEXT:    br label [[SIMD_IF_END185]]
28738 // CHECK14:       simd.if.end185:
28739 // CHECK14-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
28740 // CHECK14-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
28741 // CHECK14-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
28742 // CHECK14-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
28743 // CHECK14-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28744 // CHECK14-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
28745 // CHECK14-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
28746 // CHECK14-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
28747 // CHECK14-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
28748 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
28749 // CHECK14-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
28750 // CHECK14-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
28751 // CHECK14-NEXT:    store i32 0, i32* [[I195]], align 4
28752 // CHECK14-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28753 // CHECK14-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
28754 // CHECK14-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
28755 // CHECK14:       simd.if.then197:
28756 // CHECK14-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
28757 // CHECK14-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
28758 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
28759 // CHECK14:       omp.inner.for.cond200:
28760 // CHECK14-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28761 // CHECK14-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
28762 // CHECK14-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
28763 // CHECK14-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
28764 // CHECK14:       omp.inner.for.body202:
28765 // CHECK14-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28766 // CHECK14-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
28767 // CHECK14-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
28768 // CHECK14-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
28769 // CHECK14-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
28770 // CHECK14-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28771 // CHECK14-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
28772 // CHECK14-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
28773 // CHECK14-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
28774 // CHECK14-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
28775 // CHECK14-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28776 // CHECK14-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
28777 // CHECK14-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
28778 // CHECK14-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
28779 // CHECK14-NEXT:    [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
28780 // CHECK14-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
28781 // CHECK14-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
28782 // CHECK14-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
28783 // CHECK14-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
28784 // CHECK14-NEXT:    store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
28785 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
28786 // CHECK14:       omp.body.continue212:
28787 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
28788 // CHECK14:       omp.inner.for.inc213:
28789 // CHECK14-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28790 // CHECK14-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
28791 // CHECK14-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
28792 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
28793 // CHECK14:       omp.inner.for.end215:
28794 // CHECK14-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
28795 // CHECK14-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
28796 // CHECK14-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
28797 // CHECK14-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
28798 // CHECK14-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
28799 // CHECK14-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
28800 // CHECK14-NEXT:    br label [[SIMD_IF_END220]]
28801 // CHECK14:       simd.if.end220:
28802 // CHECK14-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
28803 // CHECK14-NEXT:    ret i32 [[CALL]]
28804 //
28805 //
28806 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
28807 // CHECK14-SAME: () #[[ATTR1:[0-9]+]] comdat {
28808 // CHECK14-NEXT:  entry:
28809 // CHECK14-NEXT:    [[A:%.*]] = alloca i32*, align 8
28810 // CHECK14-NEXT:    [[B:%.*]] = alloca i32*, align 8
28811 // CHECK14-NEXT:    [[C:%.*]] = alloca i32*, align 8
28812 // CHECK14-NEXT:    [[N:%.*]] = alloca i32, align 4
28813 // CHECK14-NEXT:    [[CH:%.*]] = alloca i32, align 4
28814 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28815 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28816 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28817 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28818 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28819 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
28820 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28821 // CHECK14-NEXT:    [[I3:%.*]] = alloca i32, align 4
28822 // CHECK14-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
28823 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
28824 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
28825 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28826 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28827 // CHECK14-NEXT:    [[I23:%.*]] = alloca i32, align 4
28828 // CHECK14-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
28829 // CHECK14-NEXT:    [[I27:%.*]] = alloca i32, align 4
28830 // CHECK14-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
28831 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
28832 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
28833 // CHECK14-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
28834 // CHECK14-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
28835 // CHECK14-NEXT:    [[I57:%.*]] = alloca i32, align 4
28836 // CHECK14-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
28837 // CHECK14-NEXT:    [[I61:%.*]] = alloca i32, align 4
28838 // CHECK14-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
28839 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
28840 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
28841 // CHECK14-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
28842 // CHECK14-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
28843 // CHECK14-NEXT:    [[I91:%.*]] = alloca i32, align 4
28844 // CHECK14-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
28845 // CHECK14-NEXT:    [[I95:%.*]] = alloca i32, align 4
28846 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
28847 // CHECK14-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
28848 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
28849 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
28850 // CHECK14-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
28851 // CHECK14-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
28852 // CHECK14-NEXT:    [[I126:%.*]] = alloca i32, align 4
28853 // CHECK14-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
28854 // CHECK14-NEXT:    [[I130:%.*]] = alloca i32, align 4
28855 // CHECK14-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
28856 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
28857 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
28858 // CHECK14-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
28859 // CHECK14-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
28860 // CHECK14-NEXT:    [[I160:%.*]] = alloca i32, align 4
28861 // CHECK14-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
28862 // CHECK14-NEXT:    [[I164:%.*]] = alloca i32, align 4
28863 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
28864 // CHECK14-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
28865 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
28866 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
28867 // CHECK14-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
28868 // CHECK14-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
28869 // CHECK14-NEXT:    [[I195:%.*]] = alloca i32, align 4
28870 // CHECK14-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
28871 // CHECK14-NEXT:    [[I199:%.*]] = alloca i32, align 4
28872 // CHECK14-NEXT:    store i32 10000, i32* [[N]], align 4
28873 // CHECK14-NEXT:    store i32 100, i32* [[CH]], align 4
28874 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
28875 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28876 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28877 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
28878 // CHECK14-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
28879 // CHECK14-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
28880 // CHECK14-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28881 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28882 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28883 // CHECK14-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
28884 // CHECK14-NEXT:    store i32 0, i32* [[I]], align 4
28885 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28886 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
28887 // CHECK14-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28888 // CHECK14:       simd.if.then:
28889 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28890 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
28891 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28892 // CHECK14:       omp.inner.for.cond:
28893 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28894 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
28895 // CHECK14-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
28896 // CHECK14-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28897 // CHECK14:       omp.inner.for.body:
28898 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28899 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
28900 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28901 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
28902 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
28903 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28904 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
28905 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
28906 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28907 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
28908 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28909 // CHECK14-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
28910 // CHECK14-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
28911 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
28912 // CHECK14-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
28913 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
28914 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
28915 // CHECK14-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
28916 // CHECK14-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
28917 // CHECK14-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
28918 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28919 // CHECK14:       omp.body.continue:
28920 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28921 // CHECK14:       omp.inner.for.inc:
28922 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28923 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
28924 // CHECK14-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28925 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
28926 // CHECK14:       omp.inner.for.end:
28927 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28928 // CHECK14-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
28929 // CHECK14-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
28930 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
28931 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
28932 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
28933 // CHECK14-NEXT:    br label [[SIMD_IF_END]]
28934 // CHECK14:       simd.if.end:
28935 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
28936 // CHECK14-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
28937 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28938 // CHECK14-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
28939 // CHECK14-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
28940 // CHECK14-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
28941 // CHECK14-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
28942 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
28943 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
28944 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
28945 // CHECK14-NEXT:    store i32 0, i32* [[I23]], align 4
28946 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28947 // CHECK14-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
28948 // CHECK14-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
28949 // CHECK14:       simd.if.then25:
28950 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
28951 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
28952 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
28953 // CHECK14:       omp.inner.for.cond28:
28954 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28955 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
28956 // CHECK14-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
28957 // CHECK14-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
28958 // CHECK14:       omp.inner.for.body30:
28959 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28960 // CHECK14-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
28961 // CHECK14-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
28962 // CHECK14-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
28963 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
28964 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28965 // CHECK14-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
28966 // CHECK14-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
28967 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
28968 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
28969 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28970 // CHECK14-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
28971 // CHECK14-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
28972 // CHECK14-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
28973 // CHECK14-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
28974 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
28975 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
28976 // CHECK14-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
28977 // CHECK14-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
28978 // CHECK14-NEXT:    store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
28979 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
28980 // CHECK14:       omp.body.continue40:
28981 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
28982 // CHECK14:       omp.inner.for.inc41:
28983 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28984 // CHECK14-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
28985 // CHECK14-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
28986 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
28987 // CHECK14:       omp.inner.for.end43:
28988 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
28989 // CHECK14-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
28990 // CHECK14-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
28991 // CHECK14-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
28992 // CHECK14-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
28993 // CHECK14-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
28994 // CHECK14-NEXT:    br label [[SIMD_IF_END48]]
28995 // CHECK14:       simd.if.end48:
28996 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
28997 // CHECK14-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
28998 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
28999 // CHECK14-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
29000 // CHECK14-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
29001 // CHECK14-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
29002 // CHECK14-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
29003 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
29004 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
29005 // CHECK14-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
29006 // CHECK14-NEXT:    store i32 0, i32* [[I57]], align 4
29007 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
29008 // CHECK14-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
29009 // CHECK14-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
29010 // CHECK14:       simd.if.then59:
29011 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
29012 // CHECK14-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
29013 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
29014 // CHECK14:       omp.inner.for.cond62:
29015 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29016 // CHECK14-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
29017 // CHECK14-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29018 // CHECK14-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
29019 // CHECK14:       omp.inner.for.body64:
29020 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29021 // CHECK14-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
29022 // CHECK14-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
29023 // CHECK14-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
29024 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
29025 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29026 // CHECK14-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
29027 // CHECK14-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
29028 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
29029 // CHECK14-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
29030 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29031 // CHECK14-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
29032 // CHECK14-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
29033 // CHECK14-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
29034 // CHECK14-NEXT:    [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
29035 // CHECK14-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
29036 // CHECK14-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
29037 // CHECK14-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
29038 // CHECK14-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
29039 // CHECK14-NEXT:    store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
29040 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
29041 // CHECK14:       omp.body.continue74:
29042 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
29043 // CHECK14:       omp.inner.for.inc75:
29044 // CHECK14-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29045 // CHECK14-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
29046 // CHECK14-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
29047 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
29048 // CHECK14:       omp.inner.for.end77:
29049 // CHECK14-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
29050 // CHECK14-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
29051 // CHECK14-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
29052 // CHECK14-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
29053 // CHECK14-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
29054 // CHECK14-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
29055 // CHECK14-NEXT:    br label [[SIMD_IF_END82]]
29056 // CHECK14:       simd.if.end82:
29057 // CHECK14-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
29058 // CHECK14-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
29059 // CHECK14-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29060 // CHECK14-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
29061 // CHECK14-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
29062 // CHECK14-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
29063 // CHECK14-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
29064 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
29065 // CHECK14-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
29066 // CHECK14-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
29067 // CHECK14-NEXT:    store i32 0, i32* [[I91]], align 4
29068 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29069 // CHECK14-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
29070 // CHECK14-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
29071 // CHECK14:       simd.if.then93:
29072 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
29073 // CHECK14-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
29074 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
29075 // CHECK14:       omp.inner.for.cond96:
29076 // CHECK14-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29077 // CHECK14-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
29078 // CHECK14-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
29079 // CHECK14-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
29080 // CHECK14:       omp.inner.for.body98:
29081 // CHECK14-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29082 // CHECK14-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
29083 // CHECK14-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
29084 // CHECK14-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
29085 // CHECK14-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
29086 // CHECK14-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29087 // CHECK14-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
29088 // CHECK14-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
29089 // CHECK14-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
29090 // CHECK14-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
29091 // CHECK14-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29092 // CHECK14-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
29093 // CHECK14-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
29094 // CHECK14-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
29095 // CHECK14-NEXT:    [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
29096 // CHECK14-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
29097 // CHECK14-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
29098 // CHECK14-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
29099 // CHECK14-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
29100 // CHECK14-NEXT:    store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
29101 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
29102 // CHECK14:       omp.body.continue108:
29103 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
29104 // CHECK14:       omp.inner.for.inc109:
29105 // CHECK14-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29106 // CHECK14-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
29107 // CHECK14-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
29108 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
29109 // CHECK14:       omp.inner.for.end111:
29110 // CHECK14-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
29111 // CHECK14-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
29112 // CHECK14-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
29113 // CHECK14-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
29114 // CHECK14-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
29115 // CHECK14-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
29116 // CHECK14-NEXT:    br label [[SIMD_IF_END116]]
29117 // CHECK14:       simd.if.end116:
29118 // CHECK14-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
29119 // CHECK14-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
29120 // CHECK14-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
29121 // CHECK14-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
29122 // CHECK14-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29123 // CHECK14-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
29124 // CHECK14-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
29125 // CHECK14-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
29126 // CHECK14-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
29127 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
29128 // CHECK14-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
29129 // CHECK14-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
29130 // CHECK14-NEXT:    store i32 0, i32* [[I126]], align 4
29131 // CHECK14-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29132 // CHECK14-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
29133 // CHECK14-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
29134 // CHECK14:       simd.if.then128:
29135 // CHECK14-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
29136 // CHECK14-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
29137 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
29138 // CHECK14:       omp.inner.for.cond131:
29139 // CHECK14-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29140 // CHECK14-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
29141 // CHECK14-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
29142 // CHECK14-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
29143 // CHECK14:       omp.inner.for.body133:
29144 // CHECK14-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29145 // CHECK14-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
29146 // CHECK14-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
29147 // CHECK14-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
29148 // CHECK14-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
29149 // CHECK14-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29150 // CHECK14-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
29151 // CHECK14-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
29152 // CHECK14-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
29153 // CHECK14-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
29154 // CHECK14-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29155 // CHECK14-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
29156 // CHECK14-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
29157 // CHECK14-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
29158 // CHECK14-NEXT:    [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
29159 // CHECK14-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
29160 // CHECK14-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
29161 // CHECK14-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
29162 // CHECK14-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
29163 // CHECK14-NEXT:    store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
29164 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
29165 // CHECK14:       omp.body.continue143:
29166 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
29167 // CHECK14:       omp.inner.for.inc144:
29168 // CHECK14-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29169 // CHECK14-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
29170 // CHECK14-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
29171 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
29172 // CHECK14:       omp.inner.for.end146:
29173 // CHECK14-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
29174 // CHECK14-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
29175 // CHECK14-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
29176 // CHECK14-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
29177 // CHECK14-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
29178 // CHECK14-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
29179 // CHECK14-NEXT:    br label [[SIMD_IF_END151]]
29180 // CHECK14:       simd.if.end151:
29181 // CHECK14-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
29182 // CHECK14-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
29183 // CHECK14-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29184 // CHECK14-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
29185 // CHECK14-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
29186 // CHECK14-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
29187 // CHECK14-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
29188 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
29189 // CHECK14-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
29190 // CHECK14-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
29191 // CHECK14-NEXT:    store i32 0, i32* [[I160]], align 4
29192 // CHECK14-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29193 // CHECK14-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
29194 // CHECK14-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
29195 // CHECK14:       simd.if.then162:
29196 // CHECK14-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
29197 // CHECK14-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
29198 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
29199 // CHECK14:       omp.inner.for.cond165:
29200 // CHECK14-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29201 // CHECK14-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
29202 // CHECK14-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
29203 // CHECK14-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
29204 // CHECK14:       omp.inner.for.body167:
29205 // CHECK14-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29206 // CHECK14-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
29207 // CHECK14-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
29208 // CHECK14-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
29209 // CHECK14-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
29210 // CHECK14-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29211 // CHECK14-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
29212 // CHECK14-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
29213 // CHECK14-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
29214 // CHECK14-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
29215 // CHECK14-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29216 // CHECK14-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
29217 // CHECK14-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
29218 // CHECK14-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
29219 // CHECK14-NEXT:    [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
29220 // CHECK14-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
29221 // CHECK14-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
29222 // CHECK14-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
29223 // CHECK14-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
29224 // CHECK14-NEXT:    store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
29225 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
29226 // CHECK14:       omp.body.continue177:
29227 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
29228 // CHECK14:       omp.inner.for.inc178:
29229 // CHECK14-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29230 // CHECK14-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
29231 // CHECK14-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
29232 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
29233 // CHECK14:       omp.inner.for.end180:
29234 // CHECK14-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
29235 // CHECK14-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
29236 // CHECK14-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
29237 // CHECK14-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
29238 // CHECK14-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
29239 // CHECK14-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
29240 // CHECK14-NEXT:    br label [[SIMD_IF_END185]]
29241 // CHECK14:       simd.if.end185:
29242 // CHECK14-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
29243 // CHECK14-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
29244 // CHECK14-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
29245 // CHECK14-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
29246 // CHECK14-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29247 // CHECK14-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
29248 // CHECK14-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
29249 // CHECK14-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
29250 // CHECK14-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
29251 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
29252 // CHECK14-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
29253 // CHECK14-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
29254 // CHECK14-NEXT:    store i32 0, i32* [[I195]], align 4
29255 // CHECK14-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29256 // CHECK14-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
29257 // CHECK14-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
29258 // CHECK14:       simd.if.then197:
29259 // CHECK14-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
29260 // CHECK14-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
29261 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
29262 // CHECK14:       omp.inner.for.cond200:
29263 // CHECK14-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29264 // CHECK14-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
29265 // CHECK14-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
29266 // CHECK14-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
29267 // CHECK14:       omp.inner.for.body202:
29268 // CHECK14-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29269 // CHECK14-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
29270 // CHECK14-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
29271 // CHECK14-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
29272 // CHECK14-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
29273 // CHECK14-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29274 // CHECK14-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
29275 // CHECK14-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
29276 // CHECK14-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
29277 // CHECK14-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
29278 // CHECK14-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29279 // CHECK14-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
29280 // CHECK14-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
29281 // CHECK14-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
29282 // CHECK14-NEXT:    [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
29283 // CHECK14-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
29284 // CHECK14-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
29285 // CHECK14-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
29286 // CHECK14-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
29287 // CHECK14-NEXT:    store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
29288 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
29289 // CHECK14:       omp.body.continue212:
29290 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
29291 // CHECK14:       omp.inner.for.inc213:
29292 // CHECK14-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29293 // CHECK14-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
29294 // CHECK14-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
29295 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
29296 // CHECK14:       omp.inner.for.end215:
29297 // CHECK14-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
29298 // CHECK14-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
29299 // CHECK14-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
29300 // CHECK14-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
29301 // CHECK14-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
29302 // CHECK14-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
29303 // CHECK14-NEXT:    br label [[SIMD_IF_END220]]
29304 // CHECK14:       simd.if.end220:
29305 // CHECK14-NEXT:    ret i32 0
29306 //
29307 //
29308 // CHECK15-LABEL: define {{[^@]+}}@main
29309 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
29310 // CHECK15-NEXT:  entry:
29311 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
29312 // CHECK15-NEXT:    [[A:%.*]] = alloca double*, align 4
29313 // CHECK15-NEXT:    [[B:%.*]] = alloca double*, align 4
29314 // CHECK15-NEXT:    [[C:%.*]] = alloca double*, align 4
29315 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
29316 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
29317 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29318 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29319 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29320 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29321 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29322 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
29323 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29324 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
29325 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
29326 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
29327 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
29328 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
29329 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
29330 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
29331 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
29332 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
29333 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
29334 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
29335 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
29336 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
29337 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
29338 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
29339 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
29340 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
29341 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
29342 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
29343 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
29344 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
29345 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
29346 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
29347 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
29348 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
29349 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
29350 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
29351 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
29352 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
29353 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
29354 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
29355 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
29356 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
29357 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
29358 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
29359 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
29360 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
29361 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
29362 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
29363 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
29364 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
29365 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
29366 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
29367 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
29368 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
29369 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
29370 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
29371 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
29372 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
29373 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
29374 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
29375 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
29376 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
29377 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
29378 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
29379 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29380 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29381 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
29382 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
29383 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
29384 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29385 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29386 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29387 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29388 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
29389 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29390 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
29391 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29392 // CHECK15:       simd.if.then:
29393 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29394 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
29395 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29396 // CHECK15:       omp.inner.for.cond:
29397 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29398 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
29399 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
29400 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29401 // CHECK15:       omp.inner.for.body:
29402 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29403 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
29404 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29405 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
29406 // CHECK15-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
29407 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29408 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
29409 // CHECK15-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
29410 // CHECK15-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
29411 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29412 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
29413 // CHECK15-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
29414 // CHECK15-NEXT:    [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
29415 // CHECK15-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
29416 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
29417 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
29418 // CHECK15-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
29419 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29420 // CHECK15:       omp.body.continue:
29421 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29422 // CHECK15:       omp.inner.for.inc:
29423 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29424 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
29425 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29426 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
29427 // CHECK15:       omp.inner.for.end:
29428 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29429 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
29430 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
29431 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
29432 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
29433 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
29434 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
29435 // CHECK15:       simd.if.end:
29436 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
29437 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
29438 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29439 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
29440 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
29441 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
29442 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
29443 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
29444 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
29445 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
29446 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
29447 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29448 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
29449 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
29450 // CHECK15:       simd.if.then23:
29451 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
29452 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
29453 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
29454 // CHECK15:       omp.inner.for.cond26:
29455 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29456 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
29457 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
29458 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
29459 // CHECK15:       omp.inner.for.body28:
29460 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29461 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
29462 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
29463 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
29464 // CHECK15-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
29465 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29466 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
29467 // CHECK15-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
29468 // CHECK15-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
29469 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29470 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
29471 // CHECK15-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
29472 // CHECK15-NEXT:    [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
29473 // CHECK15-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
29474 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
29475 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
29476 // CHECK15-NEXT:    store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
29477 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
29478 // CHECK15:       omp.body.continue35:
29479 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
29480 // CHECK15:       omp.inner.for.inc36:
29481 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29482 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
29483 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
29484 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
29485 // CHECK15:       omp.inner.for.end38:
29486 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29487 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
29488 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
29489 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
29490 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
29491 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
29492 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
29493 // CHECK15:       simd.if.end43:
29494 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
29495 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
29496 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29497 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
29498 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
29499 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
29500 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
29501 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
29502 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
29503 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
29504 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
29505 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29506 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
29507 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
29508 // CHECK15:       simd.if.then54:
29509 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
29510 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
29511 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
29512 // CHECK15:       omp.inner.for.cond57:
29513 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29514 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
29515 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29516 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
29517 // CHECK15:       omp.inner.for.body59:
29518 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29519 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
29520 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
29521 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
29522 // CHECK15-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
29523 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29524 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
29525 // CHECK15-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
29526 // CHECK15-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
29527 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29528 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
29529 // CHECK15-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
29530 // CHECK15-NEXT:    [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
29531 // CHECK15-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
29532 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
29533 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
29534 // CHECK15-NEXT:    store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
29535 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
29536 // CHECK15:       omp.body.continue66:
29537 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
29538 // CHECK15:       omp.inner.for.inc67:
29539 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29540 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
29541 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
29542 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
29543 // CHECK15:       omp.inner.for.end69:
29544 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29545 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
29546 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
29547 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
29548 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
29549 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
29550 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
29551 // CHECK15:       simd.if.end74:
29552 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
29553 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
29554 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29555 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
29556 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
29557 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
29558 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
29559 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
29560 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
29561 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
29562 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
29563 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29564 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
29565 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
29566 // CHECK15:       simd.if.then85:
29567 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
29568 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
29569 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
29570 // CHECK15:       omp.inner.for.cond88:
29571 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29572 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
29573 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
29574 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
29575 // CHECK15:       omp.inner.for.body90:
29576 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29577 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
29578 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
29579 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
29580 // CHECK15-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
29581 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29582 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
29583 // CHECK15-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
29584 // CHECK15-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
29585 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29586 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
29587 // CHECK15-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
29588 // CHECK15-NEXT:    [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
29589 // CHECK15-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
29590 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
29591 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
29592 // CHECK15-NEXT:    store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
29593 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
29594 // CHECK15:       omp.body.continue97:
29595 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
29596 // CHECK15:       omp.inner.for.inc98:
29597 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29598 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
29599 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
29600 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
29601 // CHECK15:       omp.inner.for.end100:
29602 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
29603 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
29604 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
29605 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
29606 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
29607 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
29608 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
29609 // CHECK15:       simd.if.end105:
29610 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
29611 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
29612 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
29613 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
29614 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29615 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
29616 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
29617 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
29618 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
29619 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
29620 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
29621 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
29622 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
29623 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29624 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
29625 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
29626 // CHECK15:       simd.if.then117:
29627 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
29628 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
29629 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
29630 // CHECK15:       omp.inner.for.cond120:
29631 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29632 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
29633 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
29634 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
29635 // CHECK15:       omp.inner.for.body122:
29636 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29637 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
29638 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
29639 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
29640 // CHECK15-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
29641 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29642 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
29643 // CHECK15-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
29644 // CHECK15-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
29645 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29646 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
29647 // CHECK15-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
29648 // CHECK15-NEXT:    [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
29649 // CHECK15-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
29650 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
29651 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
29652 // CHECK15-NEXT:    store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
29653 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
29654 // CHECK15:       omp.body.continue129:
29655 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
29656 // CHECK15:       omp.inner.for.inc130:
29657 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29658 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
29659 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
29660 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
29661 // CHECK15:       omp.inner.for.end132:
29662 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
29663 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
29664 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
29665 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
29666 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
29667 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
29668 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
29669 // CHECK15:       simd.if.end137:
29670 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
29671 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
29672 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29673 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
29674 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
29675 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
29676 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
29677 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
29678 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
29679 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
29680 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
29681 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29682 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
29683 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
29684 // CHECK15:       simd.if.then148:
29685 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
29686 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
29687 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
29688 // CHECK15:       omp.inner.for.cond151:
29689 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29690 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
29691 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
29692 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
29693 // CHECK15:       omp.inner.for.body153:
29694 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29695 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
29696 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
29697 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
29698 // CHECK15-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
29699 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29700 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
29701 // CHECK15-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
29702 // CHECK15-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
29703 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29704 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
29705 // CHECK15-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
29706 // CHECK15-NEXT:    [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
29707 // CHECK15-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
29708 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
29709 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
29710 // CHECK15-NEXT:    store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
29711 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
29712 // CHECK15:       omp.body.continue160:
29713 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
29714 // CHECK15:       omp.inner.for.inc161:
29715 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29716 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
29717 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
29718 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
29719 // CHECK15:       omp.inner.for.end163:
29720 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
29721 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
29722 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
29723 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
29724 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
29725 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
29726 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
29727 // CHECK15:       simd.if.end168:
29728 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
29729 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
29730 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
29731 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
29732 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29733 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
29734 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
29735 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
29736 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
29737 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
29738 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
29739 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
29740 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
29741 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29742 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
29743 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
29744 // CHECK15:       simd.if.then180:
29745 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
29746 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
29747 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
29748 // CHECK15:       omp.inner.for.cond183:
29749 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29750 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
29751 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
29752 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
29753 // CHECK15:       omp.inner.for.body185:
29754 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29755 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
29756 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
29757 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
29758 // CHECK15-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
29759 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29760 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
29761 // CHECK15-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
29762 // CHECK15-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
29763 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29764 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
29765 // CHECK15-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
29766 // CHECK15-NEXT:    [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
29767 // CHECK15-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
29768 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
29769 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
29770 // CHECK15-NEXT:    store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
29771 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
29772 // CHECK15:       omp.body.continue192:
29773 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
29774 // CHECK15:       omp.inner.for.inc193:
29775 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29776 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
29777 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
29778 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
29779 // CHECK15:       omp.inner.for.end195:
29780 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
29781 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
29782 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
29783 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
29784 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
29785 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
29786 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
29787 // CHECK15:       simd.if.end200:
29788 // CHECK15-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
29789 // CHECK15-NEXT:    ret i32 [[CALL]]
29790 //
29791 //
29792 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
29793 // CHECK15-SAME: () #[[ATTR1:[0-9]+]] comdat {
29794 // CHECK15-NEXT:  entry:
29795 // CHECK15-NEXT:    [[A:%.*]] = alloca i32*, align 4
29796 // CHECK15-NEXT:    [[B:%.*]] = alloca i32*, align 4
29797 // CHECK15-NEXT:    [[C:%.*]] = alloca i32*, align 4
29798 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
29799 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
29800 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29801 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29802 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29803 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29804 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29805 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
29806 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29807 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
29808 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
29809 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
29810 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
29811 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
29812 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
29813 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
29814 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
29815 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
29816 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
29817 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
29818 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
29819 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
29820 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
29821 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
29822 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
29823 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
29824 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
29825 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
29826 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
29827 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
29828 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
29829 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
29830 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
29831 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
29832 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
29833 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
29834 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
29835 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
29836 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
29837 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
29838 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
29839 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
29840 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
29841 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
29842 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
29843 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
29844 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
29845 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
29846 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
29847 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
29848 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
29849 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
29850 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
29851 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
29852 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
29853 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
29854 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
29855 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
29856 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
29857 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
29858 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
29859 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
29860 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
29861 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29862 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29863 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
29864 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
29865 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
29866 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29867 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29868 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29869 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
29870 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
29871 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29872 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
29873 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29874 // CHECK15:       simd.if.then:
29875 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29876 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
29877 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29878 // CHECK15:       omp.inner.for.cond:
29879 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29880 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
29881 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
29882 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29883 // CHECK15:       omp.inner.for.body:
29884 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29885 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
29886 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29887 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
29888 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
29889 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29890 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
29891 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29892 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
29893 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29894 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
29895 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
29896 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
29897 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
29898 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
29899 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
29900 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
29901 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29902 // CHECK15:       omp.body.continue:
29903 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29904 // CHECK15:       omp.inner.for.inc:
29905 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29906 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
29907 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29908 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
29909 // CHECK15:       omp.inner.for.end:
29910 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29911 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
29912 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
29913 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
29914 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
29915 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
29916 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
29917 // CHECK15:       simd.if.end:
29918 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
29919 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
29920 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29921 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
29922 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
29923 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
29924 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
29925 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
29926 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
29927 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
29928 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
29929 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29930 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
29931 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
29932 // CHECK15:       simd.if.then23:
29933 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
29934 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
29935 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
29936 // CHECK15:       omp.inner.for.cond26:
29937 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29938 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
29939 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
29940 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
29941 // CHECK15:       omp.inner.for.body28:
29942 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29943 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
29944 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
29945 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
29946 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
29947 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29948 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
29949 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
29950 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
29951 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29952 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
29953 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
29954 // CHECK15-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
29955 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
29956 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
29957 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
29958 // CHECK15-NEXT:    store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
29959 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
29960 // CHECK15:       omp.body.continue35:
29961 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
29962 // CHECK15:       omp.inner.for.inc36:
29963 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29964 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
29965 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
29966 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
29967 // CHECK15:       omp.inner.for.end38:
29968 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
29969 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
29970 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
29971 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
29972 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
29973 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
29974 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
29975 // CHECK15:       simd.if.end43:
29976 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
29977 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
29978 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29979 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
29980 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
29981 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
29982 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
29983 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
29984 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
29985 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
29986 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
29987 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
29988 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
29989 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
29990 // CHECK15:       simd.if.then54:
29991 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
29992 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
29993 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
29994 // CHECK15:       omp.inner.for.cond57:
29995 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
29996 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
29997 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
29998 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
29999 // CHECK15:       omp.inner.for.body59:
30000 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30001 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
30002 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
30003 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
30004 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
30005 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30006 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
30007 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
30008 // CHECK15-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
30009 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30010 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
30011 // CHECK15-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
30012 // CHECK15-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
30013 // CHECK15-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
30014 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30015 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
30016 // CHECK15-NEXT:    store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
30017 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30018 // CHECK15:       omp.body.continue66:
30019 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30020 // CHECK15:       omp.inner.for.inc67:
30021 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30022 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30023 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30024 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
30025 // CHECK15:       omp.inner.for.end69:
30026 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30027 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30028 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30029 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30030 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30031 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30032 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
30033 // CHECK15:       simd.if.end74:
30034 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
30035 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
30036 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30037 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
30038 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
30039 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
30040 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
30041 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
30042 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
30043 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
30044 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
30045 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30046 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
30047 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
30048 // CHECK15:       simd.if.then85:
30049 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
30050 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
30051 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
30052 // CHECK15:       omp.inner.for.cond88:
30053 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30054 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
30055 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
30056 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
30057 // CHECK15:       omp.inner.for.body90:
30058 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30059 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
30060 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
30061 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
30062 // CHECK15-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
30063 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30064 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
30065 // CHECK15-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
30066 // CHECK15-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
30067 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30068 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
30069 // CHECK15-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
30070 // CHECK15-NEXT:    [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
30071 // CHECK15-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
30072 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
30073 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
30074 // CHECK15-NEXT:    store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
30075 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
30076 // CHECK15:       omp.body.continue97:
30077 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
30078 // CHECK15:       omp.inner.for.inc98:
30079 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30080 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
30081 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
30082 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
30083 // CHECK15:       omp.inner.for.end100:
30084 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30085 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
30086 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
30087 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
30088 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
30089 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
30090 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
30091 // CHECK15:       simd.if.end105:
30092 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
30093 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
30094 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
30095 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
30096 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30097 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
30098 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
30099 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
30100 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
30101 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
30102 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
30103 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
30104 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
30105 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30106 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
30107 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
30108 // CHECK15:       simd.if.then117:
30109 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
30110 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
30111 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
30112 // CHECK15:       omp.inner.for.cond120:
30113 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30114 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
30115 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
30116 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
30117 // CHECK15:       omp.inner.for.body122:
30118 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30119 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
30120 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
30121 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
30122 // CHECK15-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
30123 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30124 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
30125 // CHECK15-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
30126 // CHECK15-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
30127 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30128 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
30129 // CHECK15-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
30130 // CHECK15-NEXT:    [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
30131 // CHECK15-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
30132 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
30133 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
30134 // CHECK15-NEXT:    store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
30135 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
30136 // CHECK15:       omp.body.continue129:
30137 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
30138 // CHECK15:       omp.inner.for.inc130:
30139 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30140 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
30141 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
30142 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
30143 // CHECK15:       omp.inner.for.end132:
30144 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30145 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
30146 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
30147 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
30148 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
30149 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
30150 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
30151 // CHECK15:       simd.if.end137:
30152 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
30153 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
30154 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30155 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
30156 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
30157 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
30158 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
30159 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
30160 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
30161 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
30162 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
30163 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30164 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
30165 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
30166 // CHECK15:       simd.if.then148:
30167 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
30168 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
30169 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
30170 // CHECK15:       omp.inner.for.cond151:
30171 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30172 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
30173 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
30174 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
30175 // CHECK15:       omp.inner.for.body153:
30176 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30177 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
30178 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
30179 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
30180 // CHECK15-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
30181 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30182 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
30183 // CHECK15-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
30184 // CHECK15-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
30185 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30186 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
30187 // CHECK15-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
30188 // CHECK15-NEXT:    [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
30189 // CHECK15-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
30190 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
30191 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
30192 // CHECK15-NEXT:    store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
30193 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
30194 // CHECK15:       omp.body.continue160:
30195 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
30196 // CHECK15:       omp.inner.for.inc161:
30197 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30198 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
30199 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
30200 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
30201 // CHECK15:       omp.inner.for.end163:
30202 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30203 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
30204 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
30205 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
30206 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
30207 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
30208 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
30209 // CHECK15:       simd.if.end168:
30210 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
30211 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
30212 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
30213 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
30214 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30215 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
30216 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
30217 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
30218 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
30219 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
30220 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
30221 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
30222 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
30223 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30224 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
30225 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
30226 // CHECK15:       simd.if.then180:
30227 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
30228 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
30229 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
30230 // CHECK15:       omp.inner.for.cond183:
30231 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30232 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
30233 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
30234 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
30235 // CHECK15:       omp.inner.for.body185:
30236 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30237 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
30238 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
30239 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
30240 // CHECK15-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
30241 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30242 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
30243 // CHECK15-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
30244 // CHECK15-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
30245 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30246 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
30247 // CHECK15-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
30248 // CHECK15-NEXT:    [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
30249 // CHECK15-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
30250 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
30251 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
30252 // CHECK15-NEXT:    store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
30253 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
30254 // CHECK15:       omp.body.continue192:
30255 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
30256 // CHECK15:       omp.inner.for.inc193:
30257 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30258 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
30259 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
30260 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
30261 // CHECK15:       omp.inner.for.end195:
30262 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30263 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
30264 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
30265 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
30266 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
30267 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
30268 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
30269 // CHECK15:       simd.if.end200:
30270 // CHECK15-NEXT:    ret i32 0
30271 //
30272 //
30273 // CHECK16-LABEL: define {{[^@]+}}@main
30274 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] {
30275 // CHECK16-NEXT:  entry:
30276 // CHECK16-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
30277 // CHECK16-NEXT:    [[A:%.*]] = alloca double*, align 4
30278 // CHECK16-NEXT:    [[B:%.*]] = alloca double*, align 4
30279 // CHECK16-NEXT:    [[C:%.*]] = alloca double*, align 4
30280 // CHECK16-NEXT:    [[N:%.*]] = alloca i32, align 4
30281 // CHECK16-NEXT:    [[CH:%.*]] = alloca i32, align 4
30282 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30283 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30284 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30285 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30286 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30287 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
30288 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30289 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
30290 // CHECK16-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
30291 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
30292 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
30293 // CHECK16-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30294 // CHECK16-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30295 // CHECK16-NEXT:    [[I21:%.*]] = alloca i32, align 4
30296 // CHECK16-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
30297 // CHECK16-NEXT:    [[I25:%.*]] = alloca i32, align 4
30298 // CHECK16-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
30299 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
30300 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
30301 // CHECK16-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
30302 // CHECK16-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
30303 // CHECK16-NEXT:    [[I52:%.*]] = alloca i32, align 4
30304 // CHECK16-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
30305 // CHECK16-NEXT:    [[I56:%.*]] = alloca i32, align 4
30306 // CHECK16-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
30307 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
30308 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
30309 // CHECK16-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
30310 // CHECK16-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
30311 // CHECK16-NEXT:    [[I83:%.*]] = alloca i32, align 4
30312 // CHECK16-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
30313 // CHECK16-NEXT:    [[I87:%.*]] = alloca i32, align 4
30314 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
30315 // CHECK16-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
30316 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
30317 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
30318 // CHECK16-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
30319 // CHECK16-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
30320 // CHECK16-NEXT:    [[I115:%.*]] = alloca i32, align 4
30321 // CHECK16-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
30322 // CHECK16-NEXT:    [[I119:%.*]] = alloca i32, align 4
30323 // CHECK16-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
30324 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
30325 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
30326 // CHECK16-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
30327 // CHECK16-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
30328 // CHECK16-NEXT:    [[I146:%.*]] = alloca i32, align 4
30329 // CHECK16-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
30330 // CHECK16-NEXT:    [[I150:%.*]] = alloca i32, align 4
30331 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
30332 // CHECK16-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
30333 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
30334 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
30335 // CHECK16-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
30336 // CHECK16-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
30337 // CHECK16-NEXT:    [[I178:%.*]] = alloca i32, align 4
30338 // CHECK16-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
30339 // CHECK16-NEXT:    [[I182:%.*]] = alloca i32, align 4
30340 // CHECK16-NEXT:    store i32 0, i32* [[RETVAL]], align 4
30341 // CHECK16-NEXT:    store i32 10000, i32* [[N]], align 4
30342 // CHECK16-NEXT:    store i32 100, i32* [[CH]], align 4
30343 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30344 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
30345 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30346 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
30347 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30348 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30349 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30350 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30351 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30352 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
30353 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
30354 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30355 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
30356 // CHECK16-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30357 // CHECK16:       simd.if.then:
30358 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30359 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
30360 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30361 // CHECK16:       omp.inner.for.cond:
30362 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30363 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
30364 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
30365 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30366 // CHECK16:       omp.inner.for.body:
30367 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30368 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
30369 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30370 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
30371 // CHECK16-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
30372 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30373 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
30374 // CHECK16-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
30375 // CHECK16-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
30376 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30377 // CHECK16-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
30378 // CHECK16-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
30379 // CHECK16-NEXT:    [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
30380 // CHECK16-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
30381 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
30382 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
30383 // CHECK16-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
30384 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30385 // CHECK16:       omp.body.continue:
30386 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30387 // CHECK16:       omp.inner.for.inc:
30388 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30389 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
30390 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
30391 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
30392 // CHECK16:       omp.inner.for.end:
30393 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30394 // CHECK16-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
30395 // CHECK16-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
30396 // CHECK16-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
30397 // CHECK16-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
30398 // CHECK16-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
30399 // CHECK16-NEXT:    br label [[SIMD_IF_END]]
30400 // CHECK16:       simd.if.end:
30401 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
30402 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
30403 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30404 // CHECK16-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
30405 // CHECK16-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
30406 // CHECK16-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
30407 // CHECK16-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
30408 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30409 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
30410 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
30411 // CHECK16-NEXT:    store i32 0, i32* [[I21]], align 4
30412 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30413 // CHECK16-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
30414 // CHECK16-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
30415 // CHECK16:       simd.if.then23:
30416 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30417 // CHECK16-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
30418 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
30419 // CHECK16:       omp.inner.for.cond26:
30420 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30421 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
30422 // CHECK16-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
30423 // CHECK16-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
30424 // CHECK16:       omp.inner.for.body28:
30425 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30426 // CHECK16-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
30427 // CHECK16-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
30428 // CHECK16-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
30429 // CHECK16-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
30430 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30431 // CHECK16-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
30432 // CHECK16-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
30433 // CHECK16-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
30434 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30435 // CHECK16-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
30436 // CHECK16-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
30437 // CHECK16-NEXT:    [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
30438 // CHECK16-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
30439 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
30440 // CHECK16-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
30441 // CHECK16-NEXT:    store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
30442 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
30443 // CHECK16:       omp.body.continue35:
30444 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
30445 // CHECK16:       omp.inner.for.inc36:
30446 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30447 // CHECK16-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
30448 // CHECK16-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
30449 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
30450 // CHECK16:       omp.inner.for.end38:
30451 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30452 // CHECK16-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
30453 // CHECK16-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
30454 // CHECK16-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
30455 // CHECK16-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
30456 // CHECK16-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
30457 // CHECK16-NEXT:    br label [[SIMD_IF_END43]]
30458 // CHECK16:       simd.if.end43:
30459 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
30460 // CHECK16-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
30461 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30462 // CHECK16-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
30463 // CHECK16-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
30464 // CHECK16-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
30465 // CHECK16-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
30466 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
30467 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
30468 // CHECK16-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
30469 // CHECK16-NEXT:    store i32 0, i32* [[I52]], align 4
30470 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30471 // CHECK16-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
30472 // CHECK16-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
30473 // CHECK16:       simd.if.then54:
30474 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
30475 // CHECK16-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
30476 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
30477 // CHECK16:       omp.inner.for.cond57:
30478 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30479 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
30480 // CHECK16-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
30481 // CHECK16-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
30482 // CHECK16:       omp.inner.for.body59:
30483 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30484 // CHECK16-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
30485 // CHECK16-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
30486 // CHECK16-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
30487 // CHECK16-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
30488 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30489 // CHECK16-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
30490 // CHECK16-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
30491 // CHECK16-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
30492 // CHECK16-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30493 // CHECK16-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
30494 // CHECK16-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
30495 // CHECK16-NEXT:    [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
30496 // CHECK16-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
30497 // CHECK16-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
30498 // CHECK16-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
30499 // CHECK16-NEXT:    store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
30500 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30501 // CHECK16:       omp.body.continue66:
30502 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30503 // CHECK16:       omp.inner.for.inc67:
30504 // CHECK16-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30505 // CHECK16-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30506 // CHECK16-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
30507 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
30508 // CHECK16:       omp.inner.for.end69:
30509 // CHECK16-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30510 // CHECK16-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30511 // CHECK16-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30512 // CHECK16-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30513 // CHECK16-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30514 // CHECK16-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30515 // CHECK16-NEXT:    br label [[SIMD_IF_END74]]
30516 // CHECK16:       simd.if.end74:
30517 // CHECK16-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
30518 // CHECK16-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
30519 // CHECK16-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30520 // CHECK16-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
30521 // CHECK16-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
30522 // CHECK16-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
30523 // CHECK16-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
30524 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
30525 // CHECK16-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
30526 // CHECK16-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
30527 // CHECK16-NEXT:    store i32 0, i32* [[I83]], align 4
30528 // CHECK16-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30529 // CHECK16-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
30530 // CHECK16-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
30531 // CHECK16:       simd.if.then85:
30532 // CHECK16-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
30533 // CHECK16-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
30534 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
30535 // CHECK16:       omp.inner.for.cond88:
30536 // CHECK16-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30537 // CHECK16-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
30538 // CHECK16-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
30539 // CHECK16-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
30540 // CHECK16:       omp.inner.for.body90:
30541 // CHECK16-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30542 // CHECK16-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
30543 // CHECK16-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
30544 // CHECK16-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
30545 // CHECK16-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
30546 // CHECK16-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30547 // CHECK16-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
30548 // CHECK16-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
30549 // CHECK16-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
30550 // CHECK16-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30551 // CHECK16-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
30552 // CHECK16-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
30553 // CHECK16-NEXT:    [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
30554 // CHECK16-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
30555 // CHECK16-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
30556 // CHECK16-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
30557 // CHECK16-NEXT:    store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
30558 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
30559 // CHECK16:       omp.body.continue97:
30560 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
30561 // CHECK16:       omp.inner.for.inc98:
30562 // CHECK16-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30563 // CHECK16-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
30564 // CHECK16-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
30565 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
30566 // CHECK16:       omp.inner.for.end100:
30567 // CHECK16-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
30568 // CHECK16-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
30569 // CHECK16-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
30570 // CHECK16-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
30571 // CHECK16-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
30572 // CHECK16-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
30573 // CHECK16-NEXT:    br label [[SIMD_IF_END105]]
30574 // CHECK16:       simd.if.end105:
30575 // CHECK16-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
30576 // CHECK16-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
30577 // CHECK16-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
30578 // CHECK16-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
30579 // CHECK16-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30580 // CHECK16-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
30581 // CHECK16-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
30582 // CHECK16-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
30583 // CHECK16-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
30584 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
30585 // CHECK16-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
30586 // CHECK16-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
30587 // CHECK16-NEXT:    store i32 0, i32* [[I115]], align 4
30588 // CHECK16-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30589 // CHECK16-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
30590 // CHECK16-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
30591 // CHECK16:       simd.if.then117:
30592 // CHECK16-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
30593 // CHECK16-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
30594 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
30595 // CHECK16:       omp.inner.for.cond120:
30596 // CHECK16-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30597 // CHECK16-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
30598 // CHECK16-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
30599 // CHECK16-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
30600 // CHECK16:       omp.inner.for.body122:
30601 // CHECK16-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30602 // CHECK16-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
30603 // CHECK16-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
30604 // CHECK16-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
30605 // CHECK16-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
30606 // CHECK16-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30607 // CHECK16-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
30608 // CHECK16-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
30609 // CHECK16-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
30610 // CHECK16-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30611 // CHECK16-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
30612 // CHECK16-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
30613 // CHECK16-NEXT:    [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
30614 // CHECK16-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
30615 // CHECK16-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
30616 // CHECK16-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
30617 // CHECK16-NEXT:    store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
30618 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
30619 // CHECK16:       omp.body.continue129:
30620 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
30621 // CHECK16:       omp.inner.for.inc130:
30622 // CHECK16-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30623 // CHECK16-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
30624 // CHECK16-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
30625 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
30626 // CHECK16:       omp.inner.for.end132:
30627 // CHECK16-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
30628 // CHECK16-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
30629 // CHECK16-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
30630 // CHECK16-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
30631 // CHECK16-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
30632 // CHECK16-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
30633 // CHECK16-NEXT:    br label [[SIMD_IF_END137]]
30634 // CHECK16:       simd.if.end137:
30635 // CHECK16-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
30636 // CHECK16-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
30637 // CHECK16-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30638 // CHECK16-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
30639 // CHECK16-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
30640 // CHECK16-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
30641 // CHECK16-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
30642 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
30643 // CHECK16-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
30644 // CHECK16-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
30645 // CHECK16-NEXT:    store i32 0, i32* [[I146]], align 4
30646 // CHECK16-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30647 // CHECK16-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
30648 // CHECK16-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
30649 // CHECK16:       simd.if.then148:
30650 // CHECK16-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
30651 // CHECK16-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
30652 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
30653 // CHECK16:       omp.inner.for.cond151:
30654 // CHECK16-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30655 // CHECK16-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
30656 // CHECK16-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
30657 // CHECK16-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
30658 // CHECK16:       omp.inner.for.body153:
30659 // CHECK16-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30660 // CHECK16-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
30661 // CHECK16-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
30662 // CHECK16-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
30663 // CHECK16-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
30664 // CHECK16-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30665 // CHECK16-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
30666 // CHECK16-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
30667 // CHECK16-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
30668 // CHECK16-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30669 // CHECK16-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
30670 // CHECK16-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
30671 // CHECK16-NEXT:    [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
30672 // CHECK16-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
30673 // CHECK16-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
30674 // CHECK16-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
30675 // CHECK16-NEXT:    store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
30676 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
30677 // CHECK16:       omp.body.continue160:
30678 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
30679 // CHECK16:       omp.inner.for.inc161:
30680 // CHECK16-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30681 // CHECK16-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
30682 // CHECK16-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
30683 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
30684 // CHECK16:       omp.inner.for.end163:
30685 // CHECK16-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
30686 // CHECK16-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
30687 // CHECK16-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
30688 // CHECK16-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
30689 // CHECK16-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
30690 // CHECK16-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
30691 // CHECK16-NEXT:    br label [[SIMD_IF_END168]]
30692 // CHECK16:       simd.if.end168:
30693 // CHECK16-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
30694 // CHECK16-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
30695 // CHECK16-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
30696 // CHECK16-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
30697 // CHECK16-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30698 // CHECK16-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
30699 // CHECK16-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
30700 // CHECK16-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
30701 // CHECK16-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
30702 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
30703 // CHECK16-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
30704 // CHECK16-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
30705 // CHECK16-NEXT:    store i32 0, i32* [[I178]], align 4
30706 // CHECK16-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30707 // CHECK16-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
30708 // CHECK16-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
30709 // CHECK16:       simd.if.then180:
30710 // CHECK16-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
30711 // CHECK16-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
30712 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
30713 // CHECK16:       omp.inner.for.cond183:
30714 // CHECK16-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30715 // CHECK16-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
30716 // CHECK16-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
30717 // CHECK16-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
30718 // CHECK16:       omp.inner.for.body185:
30719 // CHECK16-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30720 // CHECK16-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
30721 // CHECK16-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
30722 // CHECK16-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
30723 // CHECK16-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
30724 // CHECK16-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30725 // CHECK16-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
30726 // CHECK16-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
30727 // CHECK16-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
30728 // CHECK16-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30729 // CHECK16-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
30730 // CHECK16-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
30731 // CHECK16-NEXT:    [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
30732 // CHECK16-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
30733 // CHECK16-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
30734 // CHECK16-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
30735 // CHECK16-NEXT:    store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
30736 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
30737 // CHECK16:       omp.body.continue192:
30738 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
30739 // CHECK16:       omp.inner.for.inc193:
30740 // CHECK16-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30741 // CHECK16-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
30742 // CHECK16-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
30743 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
30744 // CHECK16:       omp.inner.for.end195:
30745 // CHECK16-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
30746 // CHECK16-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
30747 // CHECK16-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
30748 // CHECK16-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
30749 // CHECK16-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
30750 // CHECK16-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
30751 // CHECK16-NEXT:    br label [[SIMD_IF_END200]]
30752 // CHECK16:       simd.if.end200:
30753 // CHECK16-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
30754 // CHECK16-NEXT:    ret i32 [[CALL]]
30755 //
30756 //
30757 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
30758 // CHECK16-SAME: () #[[ATTR1:[0-9]+]] comdat {
30759 // CHECK16-NEXT:  entry:
30760 // CHECK16-NEXT:    [[A:%.*]] = alloca i32*, align 4
30761 // CHECK16-NEXT:    [[B:%.*]] = alloca i32*, align 4
30762 // CHECK16-NEXT:    [[C:%.*]] = alloca i32*, align 4
30763 // CHECK16-NEXT:    [[N:%.*]] = alloca i32, align 4
30764 // CHECK16-NEXT:    [[CH:%.*]] = alloca i32, align 4
30765 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30766 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
30767 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
30768 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30769 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30770 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
30771 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30772 // CHECK16-NEXT:    [[I3:%.*]] = alloca i32, align 4
30773 // CHECK16-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
30774 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
30775 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
30776 // CHECK16-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
30777 // CHECK16-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
30778 // CHECK16-NEXT:    [[I21:%.*]] = alloca i32, align 4
30779 // CHECK16-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
30780 // CHECK16-NEXT:    [[I25:%.*]] = alloca i32, align 4
30781 // CHECK16-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
30782 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
30783 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
30784 // CHECK16-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
30785 // CHECK16-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
30786 // CHECK16-NEXT:    [[I52:%.*]] = alloca i32, align 4
30787 // CHECK16-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
30788 // CHECK16-NEXT:    [[I56:%.*]] = alloca i32, align 4
30789 // CHECK16-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
30790 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
30791 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
30792 // CHECK16-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
30793 // CHECK16-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
30794 // CHECK16-NEXT:    [[I83:%.*]] = alloca i32, align 4
30795 // CHECK16-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
30796 // CHECK16-NEXT:    [[I87:%.*]] = alloca i32, align 4
30797 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
30798 // CHECK16-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
30799 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
30800 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
30801 // CHECK16-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
30802 // CHECK16-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
30803 // CHECK16-NEXT:    [[I115:%.*]] = alloca i32, align 4
30804 // CHECK16-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
30805 // CHECK16-NEXT:    [[I119:%.*]] = alloca i32, align 4
30806 // CHECK16-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
30807 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
30808 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
30809 // CHECK16-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
30810 // CHECK16-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
30811 // CHECK16-NEXT:    [[I146:%.*]] = alloca i32, align 4
30812 // CHECK16-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
30813 // CHECK16-NEXT:    [[I150:%.*]] = alloca i32, align 4
30814 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
30815 // CHECK16-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
30816 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
30817 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
30818 // CHECK16-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
30819 // CHECK16-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
30820 // CHECK16-NEXT:    [[I178:%.*]] = alloca i32, align 4
30821 // CHECK16-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
30822 // CHECK16-NEXT:    [[I182:%.*]] = alloca i32, align 4
30823 // CHECK16-NEXT:    store i32 10000, i32* [[N]], align 4
30824 // CHECK16-NEXT:    store i32 100, i32* [[CH]], align 4
30825 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
30826 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
30827 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30828 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
30829 // CHECK16-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
30830 // CHECK16-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
30831 // CHECK16-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
30832 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30833 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30834 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
30835 // CHECK16-NEXT:    store i32 0, i32* [[I]], align 4
30836 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30837 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
30838 // CHECK16-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
30839 // CHECK16:       simd.if.then:
30840 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30841 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
30842 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30843 // CHECK16:       omp.inner.for.cond:
30844 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30845 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
30846 // CHECK16-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
30847 // CHECK16-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30848 // CHECK16:       omp.inner.for.body:
30849 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30850 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
30851 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30852 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
30853 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
30854 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30855 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
30856 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
30857 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
30858 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30859 // CHECK16-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
30860 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
30861 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
30862 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
30863 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
30864 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
30865 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
30866 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30867 // CHECK16:       omp.body.continue:
30868 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30869 // CHECK16:       omp.inner.for.inc:
30870 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30871 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
30872 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30873 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
30874 // CHECK16:       omp.inner.for.end:
30875 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30876 // CHECK16-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
30877 // CHECK16-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
30878 // CHECK16-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
30879 // CHECK16-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
30880 // CHECK16-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
30881 // CHECK16-NEXT:    br label [[SIMD_IF_END]]
30882 // CHECK16:       simd.if.end:
30883 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
30884 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
30885 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30886 // CHECK16-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
30887 // CHECK16-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
30888 // CHECK16-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
30889 // CHECK16-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
30890 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
30891 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
30892 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
30893 // CHECK16-NEXT:    store i32 0, i32* [[I21]], align 4
30894 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30895 // CHECK16-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
30896 // CHECK16-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
30897 // CHECK16:       simd.if.then23:
30898 // CHECK16-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
30899 // CHECK16-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
30900 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
30901 // CHECK16:       omp.inner.for.cond26:
30902 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30903 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
30904 // CHECK16-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
30905 // CHECK16-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
30906 // CHECK16:       omp.inner.for.body28:
30907 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30908 // CHECK16-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
30909 // CHECK16-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
30910 // CHECK16-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
30911 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
30912 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30913 // CHECK16-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
30914 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
30915 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
30916 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30917 // CHECK16-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
30918 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
30919 // CHECK16-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
30920 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
30921 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
30922 // CHECK16-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
30923 // CHECK16-NEXT:    store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
30924 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
30925 // CHECK16:       omp.body.continue35:
30926 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
30927 // CHECK16:       omp.inner.for.inc36:
30928 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30929 // CHECK16-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
30930 // CHECK16-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
30931 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
30932 // CHECK16:       omp.inner.for.end38:
30933 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
30934 // CHECK16-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
30935 // CHECK16-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
30936 // CHECK16-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
30937 // CHECK16-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
30938 // CHECK16-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
30939 // CHECK16-NEXT:    br label [[SIMD_IF_END43]]
30940 // CHECK16:       simd.if.end43:
30941 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
30942 // CHECK16-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
30943 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30944 // CHECK16-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
30945 // CHECK16-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
30946 // CHECK16-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
30947 // CHECK16-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
30948 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
30949 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
30950 // CHECK16-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
30951 // CHECK16-NEXT:    store i32 0, i32* [[I52]], align 4
30952 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30953 // CHECK16-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
30954 // CHECK16-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
30955 // CHECK16:       simd.if.then54:
30956 // CHECK16-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
30957 // CHECK16-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
30958 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
30959 // CHECK16:       omp.inner.for.cond57:
30960 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30961 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
30962 // CHECK16-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
30963 // CHECK16-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
30964 // CHECK16:       omp.inner.for.body59:
30965 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30966 // CHECK16-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
30967 // CHECK16-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
30968 // CHECK16-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
30969 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
30970 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30971 // CHECK16-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
30972 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
30973 // CHECK16-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
30974 // CHECK16-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30975 // CHECK16-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
30976 // CHECK16-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
30977 // CHECK16-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
30978 // CHECK16-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
30979 // CHECK16-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
30980 // CHECK16-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
30981 // CHECK16-NEXT:    store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
30982 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
30983 // CHECK16:       omp.body.continue66:
30984 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
30985 // CHECK16:       omp.inner.for.inc67:
30986 // CHECK16-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30987 // CHECK16-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
30988 // CHECK16-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
30989 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
30990 // CHECK16:       omp.inner.for.end69:
30991 // CHECK16-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
30992 // CHECK16-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
30993 // CHECK16-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
30994 // CHECK16-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
30995 // CHECK16-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
30996 // CHECK16-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
30997 // CHECK16-NEXT:    br label [[SIMD_IF_END74]]
30998 // CHECK16:       simd.if.end74:
30999 // CHECK16-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
31000 // CHECK16-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
31001 // CHECK16-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
31002 // CHECK16-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
31003 // CHECK16-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
31004 // CHECK16-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
31005 // CHECK16-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
31006 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
31007 // CHECK16-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
31008 // CHECK16-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
31009 // CHECK16-NEXT:    store i32 0, i32* [[I83]], align 4
31010 // CHECK16-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
31011 // CHECK16-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
31012 // CHECK16-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
31013 // CHECK16:       simd.if.then85:
31014 // CHECK16-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
31015 // CHECK16-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
31016 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
31017 // CHECK16:       omp.inner.for.cond88:
31018 // CHECK16-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31019 // CHECK16-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
31020 // CHECK16-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
31021 // CHECK16-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
31022 // CHECK16:       omp.inner.for.body90:
31023 // CHECK16-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31024 // CHECK16-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
31025 // CHECK16-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
31026 // CHECK16-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
31027 // CHECK16-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
31028 // CHECK16-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31029 // CHECK16-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
31030 // CHECK16-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
31031 // CHECK16-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
31032 // CHECK16-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31033 // CHECK16-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
31034 // CHECK16-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
31035 // CHECK16-NEXT:    [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
31036 // CHECK16-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
31037 // CHECK16-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
31038 // CHECK16-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
31039 // CHECK16-NEXT:    store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
31040 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
31041 // CHECK16:       omp.body.continue97:
31042 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
31043 // CHECK16:       omp.inner.for.inc98:
31044 // CHECK16-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31045 // CHECK16-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
31046 // CHECK16-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
31047 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
31048 // CHECK16:       omp.inner.for.end100:
31049 // CHECK16-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
31050 // CHECK16-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
31051 // CHECK16-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
31052 // CHECK16-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
31053 // CHECK16-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
31054 // CHECK16-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
31055 // CHECK16-NEXT:    br label [[SIMD_IF_END105]]
31056 // CHECK16:       simd.if.end105:
31057 // CHECK16-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
31058 // CHECK16-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
31059 // CHECK16-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
31060 // CHECK16-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
31061 // CHECK16-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31062 // CHECK16-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
31063 // CHECK16-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
31064 // CHECK16-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
31065 // CHECK16-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
31066 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
31067 // CHECK16-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
31068 // CHECK16-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
31069 // CHECK16-NEXT:    store i32 0, i32* [[I115]], align 4
31070 // CHECK16-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31071 // CHECK16-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
31072 // CHECK16-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
31073 // CHECK16:       simd.if.then117:
31074 // CHECK16-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
31075 // CHECK16-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
31076 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
31077 // CHECK16:       omp.inner.for.cond120:
31078 // CHECK16-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31079 // CHECK16-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
31080 // CHECK16-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
31081 // CHECK16-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
31082 // CHECK16:       omp.inner.for.body122:
31083 // CHECK16-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31084 // CHECK16-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
31085 // CHECK16-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
31086 // CHECK16-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
31087 // CHECK16-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
31088 // CHECK16-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31089 // CHECK16-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
31090 // CHECK16-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
31091 // CHECK16-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
31092 // CHECK16-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31093 // CHECK16-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
31094 // CHECK16-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
31095 // CHECK16-NEXT:    [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
31096 // CHECK16-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
31097 // CHECK16-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
31098 // CHECK16-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
31099 // CHECK16-NEXT:    store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
31100 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
31101 // CHECK16:       omp.body.continue129:
31102 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
31103 // CHECK16:       omp.inner.for.inc130:
31104 // CHECK16-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31105 // CHECK16-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
31106 // CHECK16-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
31107 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
31108 // CHECK16:       omp.inner.for.end132:
31109 // CHECK16-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
31110 // CHECK16-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
31111 // CHECK16-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
31112 // CHECK16-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
31113 // CHECK16-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
31114 // CHECK16-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
31115 // CHECK16-NEXT:    br label [[SIMD_IF_END137]]
31116 // CHECK16:       simd.if.end137:
31117 // CHECK16-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
31118 // CHECK16-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
31119 // CHECK16-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31120 // CHECK16-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
31121 // CHECK16-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
31122 // CHECK16-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
31123 // CHECK16-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
31124 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
31125 // CHECK16-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
31126 // CHECK16-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
31127 // CHECK16-NEXT:    store i32 0, i32* [[I146]], align 4
31128 // CHECK16-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31129 // CHECK16-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
31130 // CHECK16-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
31131 // CHECK16:       simd.if.then148:
31132 // CHECK16-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
31133 // CHECK16-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
31134 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
31135 // CHECK16:       omp.inner.for.cond151:
31136 // CHECK16-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31137 // CHECK16-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
31138 // CHECK16-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
31139 // CHECK16-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
31140 // CHECK16:       omp.inner.for.body153:
31141 // CHECK16-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31142 // CHECK16-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
31143 // CHECK16-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
31144 // CHECK16-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
31145 // CHECK16-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
31146 // CHECK16-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31147 // CHECK16-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
31148 // CHECK16-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
31149 // CHECK16-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
31150 // CHECK16-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31151 // CHECK16-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
31152 // CHECK16-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
31153 // CHECK16-NEXT:    [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
31154 // CHECK16-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
31155 // CHECK16-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
31156 // CHECK16-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
31157 // CHECK16-NEXT:    store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
31158 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
31159 // CHECK16:       omp.body.continue160:
31160 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
31161 // CHECK16:       omp.inner.for.inc161:
31162 // CHECK16-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31163 // CHECK16-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
31164 // CHECK16-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
31165 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
31166 // CHECK16:       omp.inner.for.end163:
31167 // CHECK16-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
31168 // CHECK16-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
31169 // CHECK16-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
31170 // CHECK16-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
31171 // CHECK16-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
31172 // CHECK16-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
31173 // CHECK16-NEXT:    br label [[SIMD_IF_END168]]
31174 // CHECK16:       simd.if.end168:
31175 // CHECK16-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
31176 // CHECK16-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
31177 // CHECK16-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
31178 // CHECK16-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
31179 // CHECK16-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31180 // CHECK16-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
31181 // CHECK16-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
31182 // CHECK16-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
31183 // CHECK16-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
31184 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
31185 // CHECK16-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
31186 // CHECK16-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
31187 // CHECK16-NEXT:    store i32 0, i32* [[I178]], align 4
31188 // CHECK16-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31189 // CHECK16-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
31190 // CHECK16-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
31191 // CHECK16:       simd.if.then180:
31192 // CHECK16-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
31193 // CHECK16-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
31194 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
31195 // CHECK16:       omp.inner.for.cond183:
31196 // CHECK16-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31197 // CHECK16-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
31198 // CHECK16-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
31199 // CHECK16-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
31200 // CHECK16:       omp.inner.for.body185:
31201 // CHECK16-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31202 // CHECK16-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
31203 // CHECK16-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
31204 // CHECK16-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
31205 // CHECK16-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
31206 // CHECK16-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31207 // CHECK16-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
31208 // CHECK16-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
31209 // CHECK16-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
31210 // CHECK16-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31211 // CHECK16-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
31212 // CHECK16-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
31213 // CHECK16-NEXT:    [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
31214 // CHECK16-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
31215 // CHECK16-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
31216 // CHECK16-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
31217 // CHECK16-NEXT:    store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
31218 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
31219 // CHECK16:       omp.body.continue192:
31220 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
31221 // CHECK16:       omp.inner.for.inc193:
31222 // CHECK16-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31223 // CHECK16-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
31224 // CHECK16-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
31225 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
31226 // CHECK16:       omp.inner.for.end195:
31227 // CHECK16-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
31228 // CHECK16-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
31229 // CHECK16-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
31230 // CHECK16-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
31231 // CHECK16-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
31232 // CHECK16-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
31233 // CHECK16-NEXT:    br label [[SIMD_IF_END200]]
31234 // CHECK16:       simd.if.end200:
31235 // CHECK16-NEXT:    ret i32 0
31236 //
31237