1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host code gen
3 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
6 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
9
10 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
11 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
13 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
14 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK7
16
17 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
18 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
20 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
23
24 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
25 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
26 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
27 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
28 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
29 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK15
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33
34
35 template <typename T>
tmain()36 T tmain() {
37 T *a, *b, *c;
38 int n = 10000;
39 int ch = 100;
40
41 // no schedule clauses
42 #pragma omp target
43 #pragma omp teams
44 #pragma omp distribute parallel for simd
45 for (int i = 0; i < n; ++i) {
46 a[i] = b[i] + c[i];
47 }
48
49 // dist_schedule: static no chunk
50 #pragma omp target
51 #pragma omp teams
52 #pragma omp distribute parallel for simd dist_schedule(static)
53 for (int i = 0; i < n; ++i) {
54 a[i] = b[i] + c[i];
55 }
56
57 // dist_schedule: static chunk
58 #pragma omp target
59 #pragma omp teams
60 #pragma omp distribute parallel for simd dist_schedule(static, ch)
61 for (int i = 0; i < n; ++i) {
62 a[i] = b[i] + c[i];
63 }
64
65 // schedule: static no chunk
66 #pragma omp target
67 #pragma omp teams
68 #pragma omp distribute parallel for simd schedule(static)
69 for (int i = 0; i < n; ++i) {
70 a[i] = b[i] + c[i];
71 }
72
73 // schedule: static chunk
74 #pragma omp target
75 #pragma omp teams
76 #pragma omp distribute parallel for simd schedule(static, ch)
77 for (int i = 0; i < n; ++i) {
78 a[i] = b[i] + c[i];
79 }
80
81 // schedule: dynamic no chunk
82 #pragma omp target
83 #pragma omp teams
84 #pragma omp distribute parallel for simd schedule(dynamic)
85 for (int i = 0; i < n; ++i) {
86 a[i] = b[i] + c[i];
87 }
88
89 // schedule: dynamic chunk
90 #pragma omp target
91 #pragma omp teams
92 #pragma omp distribute parallel for simd schedule(dynamic, ch)
93 for (int i = 0; i < n; ++i) {
94 a[i] = b[i] + c[i];
95 }
96
97 return T();
98 }
99
main()100 int main() {
101 double *a, *b, *c;
102 int n = 10000;
103 int ch = 100;
104
105 #ifdef LAMBDA
106 [&]() {
107
108
109
110
111
112
113
114
115 // no schedule clauses
116 #pragma omp target
117 #pragma omp teams
118
119 #pragma omp distribute parallel for simd
120 for (int i = 0; i < n; ++i) {
121 a[i] = b[i] + c[i];
122
123
124 // check EUB for distribute
125
126 // initialize omp.iv
127
128 // check exit condition
129
130 // check that PrevLB and PrevUB are passed to the 'for'
131 // check that distlb and distub are properly passed to fork_call
132
133 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
134
135
136 // implementation of 'parallel for'
137
138
139 // initialize lb and ub to PrevLB and PrevUB
140
141 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
142 // In this case we use EUB
143
144 // initialize omp.iv
145
146 // check exit condition
147
148 // check that PrevLB and PrevUB are passed to the 'for'
149
150 // check stride 1 for 'for' in 'distribute parallel for simd'
151
152
153 [&]() {
154 a[i] = b[i] + c[i];
155 }();
156 }
157
158 // dist_schedule: static no chunk (same sa default - no dist_schedule)
159 #pragma omp target
160 #pragma omp teams
161
162 #pragma omp distribute parallel for simd dist_schedule(static)
163 for (int i = 0; i < n; ++i) {
164 a[i] = b[i] + c[i];
165
166
167 // check EUB for distribute
168
169 // initialize omp.iv
170
171 // check exit condition
172
173 // check that PrevLB and PrevUB are passed to the 'for'
174 // check that distlb and distub are properly passed to fork_call
175
176 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
177
178
179 // implementation of 'parallel for'
180
181
182 // initialize lb and ub to PrevLB and PrevUB
183
184 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
185 // In this case we use EUB
186
187 // initialize omp.iv
188
189 // check exit condition
190
191 // check that PrevLB and PrevUB are passed to the 'for'
192
193 // check stride 1 for 'for' in 'distribute parallel for simd'
194
195 [&]() {
196 a[i] = b[i] + c[i];
197 }();
198 }
199
200 // dist_schedule: static chunk
201 #pragma omp target
202 #pragma omp teams
203
204 #pragma omp distribute parallel for simd dist_schedule(static, ch)
205 for (int i = 0; i < n; ++i) {
206 a[i] = b[i] + c[i];
207
208
209 // check EUB for distribute
210
211 // initialize omp.iv
212
213 // check exit condition
214
215 // check that PrevLB and PrevUB are passed to the 'for'
216 // check that distlb and distub are properly passed to fork_call
217
218 // check DistInc
219
220 // Update UB
221
222 // Store LB in IV
223
224
225 // loop exit
226
227 // skip implementation of 'parallel for': using default scheduling and was tested above
228 [&]() {
229 a[i] = b[i] + c[i];
230 }();
231 }
232
233 // schedule: static no chunk
234 #pragma omp target
235 #pragma omp teams
236
237 #pragma omp distribute parallel for simd schedule(static)
238 for (int i = 0; i < n; ++i) {
239 a[i] = b[i] + c[i];
240
241 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
242
243 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
244
245
246 // initialize lb and ub to PrevLB and PrevUB
247
248 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
249 // In this case we use EUB
250
251 // initialize omp.iv
252
253 // check exit condition
254
255 // check that PrevLB and PrevUB are passed to the 'for'
256
257 // check stride 1 for 'for' in 'distribute parallel for simd'
258
259
260 [&]() {
261 a[i] = b[i] + c[i];
262 }();
263 }
264
265 // schedule: static chunk
266 #pragma omp target
267 #pragma omp teams
268
269 #pragma omp distribute parallel for simd schedule(static, ch)
270 for (int i = 0; i < n; ++i) {
271 a[i] = b[i] + c[i];
272 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
273
274 // 'parallel for' implementation using outer and inner loops and PrevEUB
275
276 // initialize lb and ub to PrevLB and PrevUB
277
278 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
279
280 // initialize omp.iv (IV = LB)
281
282 // outer loop: while (IV < UB) {
283
284
285
286 // skip body branch
287
288 // IV = IV + 1 and inner loop latch
289
290 // check NextLB and NextUB
291
292
293 [&]() {
294 a[i] = b[i] + c[i];
295 }();
296 }
297
298 // schedule: dynamic no chunk
299 #pragma omp target
300 #pragma omp teams
301
302 #pragma omp distribute parallel for simd schedule(dynamic)
303 for (int i = 0; i < n; ++i) {
304 a[i] = b[i] + c[i];
305 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
306
307 // 'parallel for' implementation using outer and inner loops and PrevEUB
308
309 // initialize lb and ub to PrevLB and PrevUB
310
311
312 // initialize omp.iv (IV = LB)
313
314
315 // skip body branch
316
317 // IV = IV + 1 and inner loop latch
318
319 // check NextLB and NextUB
320
321
322 [&]() {
323 a[i] = b[i] + c[i];
324 }();
325 }
326
327 // schedule: dynamic chunk
328 #pragma omp target
329 #pragma omp teams
330
331 #pragma omp distribute parallel for simd schedule(dynamic, ch)
332 for (int i = 0; i < n; ++i) {
333 a[i] = b[i] + c[i];
334 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
335
336 // 'parallel for' implementation using outer and inner loops and PrevEUB
337
338 // initialize lb and ub to PrevLB and PrevUB
339
340
341 // initialize omp.iv (IV = LB)
342
343
344 // skip body branch
345
346 // IV = IV + 1 and inner loop latch
347
348 // check NextLB and NextUB
349
350
351 [&]() {
352 a[i] = b[i] + c[i];
353 }();
354 }
355 }();
356 return 0;
357 #else
358
359
360
361
362
363
364
365
366
367 // no schedule clauses
368 #pragma omp target
369 #pragma omp teams
370
371 #pragma omp distribute parallel for simd
372 for (int i = 0; i < n; ++i) {
373 a[i] = b[i] + c[i];
374
375
376 // check EUB for distribute
377
378 // initialize omp.iv
379
380 // check exit condition
381
382 // check that PrevLB and PrevUB are passed to the 'for'
383 // check that distlb and distub are properly passed to fork_call
384
385 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
386
387
388 // implementation of 'parallel for'
389
390
391 // initialize lb and ub to PrevLB and PrevUB
392
393 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
394 // In this case we use EUB
395
396 // initialize omp.iv
397
398 // check exit condition
399
400 // check that PrevLB and PrevUB are passed to the 'for'
401
402 // check stride 1 for 'for' in 'distribute parallel for simd'
403
404 }
405
406 // dist_schedule: static no chunk
407 #pragma omp target
408 #pragma omp teams
409
410 #pragma omp distribute parallel for simd dist_schedule(static)
411 for (int i = 0; i < n; ++i) {
412 a[i] = b[i] + c[i];
413
414
415 // check EUB for distribute
416
417 // initialize omp.iv
418
419 // check exit condition
420
421 // check that PrevLB and PrevUB are passed to the 'for'
422 // check that distlb and distub are properly passed to fork_call
423
424 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
425
426
427 // implementation of 'parallel for'
428
429
430 // initialize lb and ub to PrevLB and PrevUB
431
432 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
433 // In this case we use EUB
434
435 // initialize omp.iv
436
437 // check exit condition
438
439 // check that PrevLB and PrevUB are passed to the 'for'
440
441 // check stride 1 for 'for' in 'distribute parallel for simd'
442
443 }
444
445 // dist_schedule: static chunk
446 #pragma omp target
447 #pragma omp teams
448
449 #pragma omp distribute parallel for simd dist_schedule(static, ch)
450 for (int i = 0; i < n; ++i) {
451 a[i] = b[i] + c[i];
452
453 // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
454
455 // check EUB for distribute
456
457 // initialize omp.iv
458
459 // check exit condition
460
461 // check that PrevLB and PrevUB are passed to the 'for'
462 // check that distlb and distub are properly passed to fork_call
463
464 // check DistInc
465
466 // Update UB
467
468 // Store LB in IV
469
470
471 // loop exit
472
473 // skip implementation of 'parallel for': using default scheduling and was tested above
474 }
475
476 // schedule: static no chunk
477 #pragma omp target
478 #pragma omp teams
479
480 #pragma omp distribute parallel for simd schedule(static)
481 for (int i = 0; i < n; ++i) {
482 a[i] = b[i] + c[i];
483
484 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
485
486 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
487
488
489 // initialize lb and ub to PrevLB and PrevUB
490
491 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
492 // In this case we use EUB
493
494 // initialize omp.iv
495
496 // check exit condition
497
498 // check that PrevLB and PrevUB are passed to the 'for'
499
500 // check stride 1 for 'for' in 'distribute parallel for simd'
501
502 }
503
504 // schedule: static chunk
505 #pragma omp target
506 #pragma omp teams
507
508 #pragma omp distribute parallel for simd schedule(static, ch)
509 for (int i = 0; i < n; ++i) {
510 a[i] = b[i] + c[i];
511 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
512
513 // 'parallel for' implementation using outer and inner loops and PrevEUB
514
515 // initialize lb and ub to PrevLB and PrevUB
516
517 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
518
519 // initialize omp.iv (IV = LB)
520
521 // outer loop: while (IV < UB) {
522
523
524
525 // skip body branch
526
527 // IV = IV + 1 and inner loop latch
528
529 // check NextLB and NextUB
530
531
532 }
533
534 // schedule: dynamic no chunk
535 #pragma omp target
536 #pragma omp teams
537
538 #pragma omp distribute parallel for simd schedule(dynamic)
539 for (int i = 0; i < n; ++i) {
540 a[i] = b[i] + c[i];
541 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
542
543 // 'parallel for' implementation using outer and inner loops and PrevEUB
544
545 // initialize lb and ub to PrevLB and PrevUB
546
547
548 // initialize omp.iv (IV = LB)
549
550
551 // skip body branch
552
553 // IV = IV + 1 and inner loop latch
554
555 // check NextLB and NextUB
556
557
558 }
559
560 // schedule: dynamic chunk
561 #pragma omp target
562 #pragma omp teams
563
564 #pragma omp distribute parallel for simd schedule(dynamic, ch)
565 for (int i = 0; i < n; ++i) {
566 a[i] = b[i] + c[i];
567 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
568
569 // 'parallel for' implementation using outer and inner loops and PrevEUB
570
571 // initialize lb and ub to PrevLB and PrevUB
572
573
574 // initialize omp.iv (IV = LB)
575
576
577 // skip body branch
578
579 // IV = IV + 1 and inner loop latch
580
581 // check NextLB and NextUB
582
583
584 }
585
586 return tmain<int>();
587 #endif
588 }
589
590 // check code
591
592
593
594
595
596
597
598
599
600
601
602 // check EUB for distribute
603
604 // initialize omp.iv
605
606 // check exit condition
607
608 // check that PrevLB and PrevUB are passed to the 'for'
609 // check that distlb and distub are properly passed to fork_call
610
611 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
612
613
614 // implementation of 'parallel for'
615
616
617 // initialize lb and ub to PrevLB and PrevUB
618
619 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
620 // In this case we use EUB
621
622 // initialize omp.iv
623
624 // check exit condition
625
626 // check that PrevLB and PrevUB are passed to the 'for'
627
628 // check stride 1 for 'for' in 'distribute parallel for simd'
629
630
631
632
633
634 // check EUB for distribute
635
636 // initialize omp.iv
637
638 // check exit condition
639
640 // check that PrevLB and PrevUB are passed to the 'for'
641 // check that distlb and distub are properly passed to fork_call
642
643 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
644
645
646 // implementation of 'parallel for'
647
648
649 // initialize lb and ub to PrevLB and PrevUB
650
651 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
652 // In this case we use EUB
653
654 // initialize omp.iv
655
656 // check exit condition
657
658 // check that PrevLB and PrevUB are passed to the 'for'
659
660 // check stride 1 for 'for' in 'distribute parallel for simd'
661
662
663
664
665 // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
666
667 // check EUB for distribute
668
669 // initialize omp.iv
670
671 // check exit condition
672
673 // check that PrevLB and PrevUB are passed to the 'for'
674 // check that distlb and distub are properly passed to fork_call
675
676 // check DistInc
677
678 // Update UB
679
680 // Store LB in IV
681
682
683 // loop exit
684
685 // skip implementation of 'parallel for': using default scheduling and was tested above
686
687
688
689 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
690
691 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
692
693
694 // initialize lb and ub to PrevLB and PrevUB
695
696 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
697 // In this case we use EUB
698
699 // initialize omp.iv
700
701 // check exit condition
702
703 // check that PrevLB and PrevUB are passed to the 'for'
704
705 // check stride 1 for 'for' in 'distribute parallel for simd'
706
707
708
709 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
710
711 // 'parallel for' implementation using outer and inner loops and PrevEUB
712
713 // initialize lb and ub to PrevLB and PrevUB
714
715 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
716
717 // initialize omp.iv (IV = LB)
718
719 // outer loop: while (IV < UB) {
720
721
722
723 // skip body branch
724
725 // IV = IV + 1 and inner loop latch
726
727 // check NextLB and NextUB
728
729
730
731
732 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
733
734 // 'parallel for' implementation using outer and inner loops and PrevEUB
735
736 // initialize lb and ub to PrevLB and PrevUB
737
738
739 // initialize omp.iv (IV = LB)
740
741
742 // skip body branch
743
744 // IV = IV + 1 and inner loop latch
745
746 // check NextLB and NextUB
747
748
749
750
751 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
752
753 // 'parallel for' implementation using outer and inner loops and PrevEUB
754
755 // initialize lb and ub to PrevLB and PrevUB
756
757
758 // initialize omp.iv (IV = LB)
759
760
761 // skip body branch
762
763 // IV = IV + 1 and inner loop latch
764
765 // check NextLB and NextUB
766
767
768
769 #endif
770 // CHECK1-LABEL: define {{[^@]+}}@main
771 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
772 // CHECK1-NEXT: entry:
773 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
774 // CHECK1-NEXT: [[A:%.*]] = alloca double*, align 8
775 // CHECK1-NEXT: [[B:%.*]] = alloca double*, align 8
776 // CHECK1-NEXT: [[C:%.*]] = alloca double*, align 8
777 // CHECK1-NEXT: [[N:%.*]] = alloca i32, align 4
778 // CHECK1-NEXT: [[CH:%.*]] = alloca i32, align 4
779 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
780 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
781 // CHECK1-NEXT: store i32 10000, i32* [[N]], align 4
782 // CHECK1-NEXT: store i32 100, i32* [[CH]], align 4
783 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
784 // CHECK1-NEXT: store i32* [[N]], i32** [[TMP0]], align 8
785 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
786 // CHECK1-NEXT: store double** [[A]], double*** [[TMP1]], align 8
787 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
788 // CHECK1-NEXT: store double** [[B]], double*** [[TMP2]], align 8
789 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
790 // CHECK1-NEXT: store double** [[C]], double*** [[TMP3]], align 8
791 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
792 // CHECK1-NEXT: store i32* [[CH]], i32** [[TMP4]], align 8
793 // CHECK1-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[REF_TMP]])
794 // CHECK1-NEXT: ret i32 0
795 //
796 //
797 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
798 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2:[0-9]+]] {
799 // CHECK1-NEXT: entry:
800 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
801 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
802 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
803 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
804 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
805 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
806 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
807 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
808 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
809 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
810 // CHECK1-NEXT: ret void
811 //
812 //
813 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
814 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
815 // CHECK1-NEXT: entry:
816 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
817 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
818 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
819 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
820 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
821 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
822 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
830 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
831 // CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
832 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
833 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
834 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
835 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
836 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
837 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
838 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
839 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
840 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
841 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
842 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
843 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
844 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
845 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
846 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
847 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
848 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
849 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
850 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
851 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
852 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
853 // CHECK1: omp.precond.then:
854 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
855 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
856 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
857 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
858 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
859 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
860 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
861 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
862 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
863 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
864 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
865 // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
866 // CHECK1: cond.true:
867 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
868 // CHECK1-NEXT: br label [[COND_END:%.*]]
869 // CHECK1: cond.false:
870 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
871 // CHECK1-NEXT: br label [[COND_END]]
872 // CHECK1: cond.end:
873 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
874 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
875 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
876 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
877 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
878 // CHECK1: omp.inner.for.cond:
879 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
880 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
881 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
882 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
883 // CHECK1: omp.inner.for.body:
884 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
885 // CHECK1-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
886 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
887 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
888 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
889 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
890 // CHECK1: omp.inner.for.inc:
891 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
892 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
893 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
894 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
895 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
896 // CHECK1: omp.inner.for.end:
897 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
898 // CHECK1: omp.loop.exit:
899 // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
900 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
901 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
902 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
903 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
904 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
905 // CHECK1: .omp.final.then:
906 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
907 // CHECK1-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
908 // CHECK1-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
909 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
910 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
911 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
912 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
913 // CHECK1: .omp.final.done:
914 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
915 // CHECK1: omp.precond.end:
916 // CHECK1-NEXT: ret void
917 //
918 //
919 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
920 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
921 // CHECK1-NEXT: entry:
922 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
923 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
924 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
925 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
926 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
927 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
928 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
929 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
930 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
931 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
932 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
933 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
934 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
935 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
936 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
937 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
938 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
939 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
940 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
941 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
942 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
943 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
944 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
945 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
946 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
947 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
948 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
949 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
950 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
951 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
952 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
953 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
954 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
955 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
956 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
957 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
958 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
959 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
960 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
961 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
962 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
963 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
964 // CHECK1: omp.precond.then:
965 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
966 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
967 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
968 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
969 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
970 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
971 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
972 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
973 // CHECK1-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
974 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
975 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
976 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
977 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
978 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
979 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
980 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
981 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
982 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
983 // CHECK1: cond.true:
984 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
985 // CHECK1-NEXT: br label [[COND_END:%.*]]
986 // CHECK1: cond.false:
987 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
988 // CHECK1-NEXT: br label [[COND_END]]
989 // CHECK1: cond.end:
990 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
991 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
992 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
993 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
994 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
995 // CHECK1: omp.inner.for.cond:
996 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
997 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
998 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
999 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1000 // CHECK1: omp.inner.for.body:
1001 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1002 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1003 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1004 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
1005 // CHECK1-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
1006 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1007 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1008 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1009 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
1010 // CHECK1-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
1011 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1012 // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1013 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1014 // CHECK1-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
1015 // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1016 // CHECK1-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
1017 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1018 // CHECK1-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1019 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1020 // CHECK1-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
1021 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1022 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
1023 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1024 // CHECK1-NEXT: store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
1025 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1026 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
1027 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1028 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
1029 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
1030 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1031 // CHECK1: omp.body.continue:
1032 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1033 // CHECK1: omp.inner.for.inc:
1034 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1035 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1036 // CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1037 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
1038 // CHECK1: omp.inner.for.end:
1039 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1040 // CHECK1: omp.loop.exit:
1041 // CHECK1-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1042 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1043 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1044 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1045 // CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1046 // CHECK1-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1047 // CHECK1: .omp.final.then:
1048 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1049 // CHECK1-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1050 // CHECK1-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1051 // CHECK1-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1052 // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1053 // CHECK1-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
1054 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1055 // CHECK1: .omp.final.done:
1056 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1057 // CHECK1: omp.precond.end:
1058 // CHECK1-NEXT: ret void
1059 //
1060 //
1061 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
1062 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1063 // CHECK1-NEXT: entry:
1064 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
1065 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
1066 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
1067 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
1068 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
1069 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
1070 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
1071 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
1072 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1073 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1074 // CHECK1-NEXT: ret void
1075 //
1076 //
1077 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
1078 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1079 // CHECK1-NEXT: entry:
1080 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1081 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1082 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1083 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1084 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1085 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1086 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1088 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1089 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1090 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1091 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1092 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1093 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1094 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1095 // CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
1096 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1097 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1098 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1099 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1100 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1101 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1102 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1103 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1104 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1105 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1106 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1107 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1108 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1109 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1110 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1111 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1112 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1113 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1114 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1115 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1116 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1117 // CHECK1: omp.precond.then:
1118 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1119 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1120 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1121 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1122 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1123 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1124 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1125 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1126 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1127 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1128 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1129 // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1130 // CHECK1: cond.true:
1131 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1132 // CHECK1-NEXT: br label [[COND_END:%.*]]
1133 // CHECK1: cond.false:
1134 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1135 // CHECK1-NEXT: br label [[COND_END]]
1136 // CHECK1: cond.end:
1137 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1138 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1139 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1140 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1141 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1142 // CHECK1: omp.inner.for.cond:
1143 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1144 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1145 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1146 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1147 // CHECK1: omp.inner.for.body:
1148 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
1149 // CHECK1-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1150 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1151 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1152 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
1153 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1154 // CHECK1: omp.inner.for.inc:
1155 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1156 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
1157 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1158 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1159 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
1160 // CHECK1: omp.inner.for.end:
1161 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1162 // CHECK1: omp.loop.exit:
1163 // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1164 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1165 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1166 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1167 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1168 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1169 // CHECK1: .omp.final.then:
1170 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1171 // CHECK1-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1172 // CHECK1-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1173 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1174 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1175 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
1176 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1177 // CHECK1: .omp.final.done:
1178 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1179 // CHECK1: omp.precond.end:
1180 // CHECK1-NEXT: ret void
1181 //
1182 //
1183 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
1184 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1185 // CHECK1-NEXT: entry:
1186 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1187 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1188 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1189 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1190 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1191 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1192 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1193 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1194 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1195 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1196 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1197 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1198 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1199 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1200 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1201 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1202 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1203 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
1204 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
1205 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1206 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1207 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1208 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1209 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1210 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1211 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1212 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1213 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1214 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1215 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1216 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1217 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1218 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1219 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1220 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1221 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1222 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1223 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1224 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1225 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1226 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1227 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1228 // CHECK1: omp.precond.then:
1229 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1230 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1231 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1232 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1233 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1234 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1235 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1236 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1237 // CHECK1-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1238 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1239 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1240 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1241 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1242 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1243 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1244 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1245 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1246 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1247 // CHECK1: cond.true:
1248 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1249 // CHECK1-NEXT: br label [[COND_END:%.*]]
1250 // CHECK1: cond.false:
1251 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1252 // CHECK1-NEXT: br label [[COND_END]]
1253 // CHECK1: cond.end:
1254 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1255 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1256 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1257 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1258 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1259 // CHECK1: omp.inner.for.cond:
1260 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1261 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
1262 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1263 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1264 // CHECK1: omp.inner.for.body:
1265 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1266 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1267 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1268 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
1269 // CHECK1-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
1270 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1271 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1272 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1273 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
1274 // CHECK1-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
1275 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1276 // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1277 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1278 // CHECK1-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
1279 // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1280 // CHECK1-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
1281 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1282 // CHECK1-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1283 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1284 // CHECK1-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
1285 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
1286 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
1287 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
1288 // CHECK1-NEXT: store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
1289 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
1290 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
1291 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
1292 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
1293 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
1294 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1295 // CHECK1: omp.body.continue:
1296 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1297 // CHECK1: omp.inner.for.inc:
1298 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1299 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1300 // CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1301 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
1302 // CHECK1: omp.inner.for.end:
1303 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1304 // CHECK1: omp.loop.exit:
1305 // CHECK1-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1306 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1307 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1308 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1309 // CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1310 // CHECK1-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1311 // CHECK1: .omp.final.then:
1312 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1313 // CHECK1-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1314 // CHECK1-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1315 // CHECK1-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1316 // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1317 // CHECK1-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
1318 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1319 // CHECK1: .omp.final.done:
1320 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1321 // CHECK1: omp.precond.end:
1322 // CHECK1-NEXT: ret void
1323 //
1324 //
1325 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
1326 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1327 // CHECK1-NEXT: entry:
1328 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
1329 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
1330 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
1331 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
1332 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
1333 // CHECK1-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
1334 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
1335 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
1336 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
1337 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
1338 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1339 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1340 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1341 // CHECK1-NEXT: ret void
1342 //
1343 //
1344 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
1345 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1346 // CHECK1-NEXT: entry:
1347 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1348 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1349 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
1350 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1351 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1352 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1353 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1354 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1355 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1356 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1357 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1358 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1359 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1360 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1361 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1362 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1363 // CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
1364 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1365 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1366 // CHECK1-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
1367 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1368 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1369 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1370 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1371 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1372 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1373 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1374 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1375 // CHECK1-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1376 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
1377 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1378 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1379 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
1380 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1381 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1382 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1383 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1384 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1385 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
1386 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1387 // CHECK1: omp.precond.then:
1388 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1389 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1390 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
1391 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1392 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1393 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1394 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1395 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1396 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
1397 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1398 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1399 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1400 // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1401 // CHECK1: cond.true:
1402 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1403 // CHECK1-NEXT: br label [[COND_END:%.*]]
1404 // CHECK1: cond.false:
1405 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1406 // CHECK1-NEXT: br label [[COND_END]]
1407 // CHECK1: cond.end:
1408 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1409 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1410 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1411 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1412 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1413 // CHECK1: omp.inner.for.cond:
1414 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1415 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1416 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
1417 // CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
1418 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1419 // CHECK1: omp.inner.for.body:
1420 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1421 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1422 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1423 // CHECK1-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1424 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
1425 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1426 // CHECK1: omp.inner.for.inc:
1427 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1428 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1429 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
1430 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1431 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1432 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1433 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1434 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1435 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1436 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1437 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
1438 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1439 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1440 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1441 // CHECK1-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
1442 // CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
1443 // CHECK1: cond.true10:
1444 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1445 // CHECK1-NEXT: br label [[COND_END12:%.*]]
1446 // CHECK1: cond.false11:
1447 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1448 // CHECK1-NEXT: br label [[COND_END12]]
1449 // CHECK1: cond.end12:
1450 // CHECK1-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
1451 // CHECK1-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1452 // CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1453 // CHECK1-NEXT: store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1454 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
1455 // CHECK1: omp.inner.for.end:
1456 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1457 // CHECK1: omp.loop.exit:
1458 // CHECK1-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1459 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
1460 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
1461 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1462 // CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1463 // CHECK1-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1464 // CHECK1: .omp.final.then:
1465 // CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1466 // CHECK1-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
1467 // CHECK1-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
1468 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
1469 // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
1470 // CHECK1-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
1471 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1472 // CHECK1: .omp.final.done:
1473 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1474 // CHECK1: omp.precond.end:
1475 // CHECK1-NEXT: ret void
1476 //
1477 //
1478 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
1479 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1480 // CHECK1-NEXT: entry:
1481 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1482 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1483 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1484 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1485 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1486 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1487 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1488 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1489 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1490 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1491 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1492 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1493 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1494 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1495 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1496 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1497 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1498 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
1499 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
1500 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1501 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1502 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1503 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1504 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1505 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1506 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1507 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1508 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1509 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1510 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1511 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1512 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1513 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1514 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1515 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1516 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1517 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1518 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1519 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1520 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1521 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1522 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1523 // CHECK1: omp.precond.then:
1524 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1525 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1526 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1527 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1528 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1529 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1530 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1531 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1532 // CHECK1-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1533 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1534 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1535 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1536 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1537 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1538 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1539 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1540 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1541 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1542 // CHECK1: cond.true:
1543 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1544 // CHECK1-NEXT: br label [[COND_END:%.*]]
1545 // CHECK1: cond.false:
1546 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1547 // CHECK1-NEXT: br label [[COND_END]]
1548 // CHECK1: cond.end:
1549 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1550 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1551 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1552 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1553 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1554 // CHECK1: omp.inner.for.cond:
1555 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1556 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
1557 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1558 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1559 // CHECK1: omp.inner.for.body:
1560 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1561 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1562 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1563 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
1564 // CHECK1-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
1565 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1566 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1567 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1568 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
1569 // CHECK1-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
1570 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1571 // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1572 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1573 // CHECK1-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
1574 // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1575 // CHECK1-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
1576 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1577 // CHECK1-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1578 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1579 // CHECK1-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
1580 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
1581 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
1582 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
1583 // CHECK1-NEXT: store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
1584 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
1585 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
1586 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
1587 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
1588 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
1589 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1590 // CHECK1: omp.body.continue:
1591 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1592 // CHECK1: omp.inner.for.inc:
1593 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1594 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1595 // CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1596 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1597 // CHECK1: omp.inner.for.end:
1598 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1599 // CHECK1: omp.loop.exit:
1600 // CHECK1-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1601 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1602 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1603 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1604 // CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1605 // CHECK1-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1606 // CHECK1: .omp.final.then:
1607 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1608 // CHECK1-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1609 // CHECK1-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1610 // CHECK1-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1611 // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1612 // CHECK1-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
1613 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1614 // CHECK1: .omp.final.done:
1615 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1616 // CHECK1: omp.precond.end:
1617 // CHECK1-NEXT: ret void
1618 //
1619 //
1620 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
1621 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1622 // CHECK1-NEXT: entry:
1623 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
1624 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
1625 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
1626 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
1627 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
1628 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
1629 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
1630 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
1631 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1632 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1633 // CHECK1-NEXT: ret void
1634 //
1635 //
1636 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
1637 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1638 // CHECK1-NEXT: entry:
1639 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1640 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1641 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1642 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1643 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1644 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1645 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1646 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1647 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1648 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1649 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1650 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1651 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1652 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1653 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1654 // CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
1655 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1656 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1657 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1658 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1659 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1660 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1661 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1662 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1663 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1664 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1665 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1666 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1667 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1668 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1669 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1670 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1671 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1672 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1673 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1674 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1675 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1676 // CHECK1: omp.precond.then:
1677 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1678 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1679 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1680 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1681 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1682 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1683 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1684 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1685 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1686 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1687 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1688 // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1689 // CHECK1: cond.true:
1690 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1691 // CHECK1-NEXT: br label [[COND_END:%.*]]
1692 // CHECK1: cond.false:
1693 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1694 // CHECK1-NEXT: br label [[COND_END]]
1695 // CHECK1: cond.end:
1696 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1697 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1698 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1699 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1700 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1701 // CHECK1: omp.inner.for.cond:
1702 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1703 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1704 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1705 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1706 // CHECK1: omp.inner.for.body:
1707 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
1708 // CHECK1-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1709 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1710 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1711 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
1712 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1713 // CHECK1: omp.inner.for.inc:
1714 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1715 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
1716 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1717 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1718 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1719 // CHECK1: omp.inner.for.end:
1720 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1721 // CHECK1: omp.loop.exit:
1722 // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1723 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1724 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1725 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1726 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1727 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1728 // CHECK1: .omp.final.then:
1729 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1730 // CHECK1-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1731 // CHECK1-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1732 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1733 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1734 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
1735 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1736 // CHECK1: .omp.final.done:
1737 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1738 // CHECK1: omp.precond.end:
1739 // CHECK1-NEXT: ret void
1740 //
1741 //
1742 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
1743 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1744 // CHECK1-NEXT: entry:
1745 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1746 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1747 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1748 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1749 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1750 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1751 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1752 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1753 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1754 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1755 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1756 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1757 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1758 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1759 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1760 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1761 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1762 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
1763 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
1764 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1765 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1766 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1767 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1768 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1769 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1770 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1771 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1772 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1773 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1774 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1775 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1776 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1777 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1778 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1779 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1780 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1781 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1782 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1783 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1784 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1785 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1786 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1787 // CHECK1: omp.precond.then:
1788 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1789 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1790 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1791 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1792 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1793 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1794 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1795 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1796 // CHECK1-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1797 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1798 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1799 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1800 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1801 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1802 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1803 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1804 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1805 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1806 // CHECK1: cond.true:
1807 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1808 // CHECK1-NEXT: br label [[COND_END:%.*]]
1809 // CHECK1: cond.false:
1810 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1811 // CHECK1-NEXT: br label [[COND_END]]
1812 // CHECK1: cond.end:
1813 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1814 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1815 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1816 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1817 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1818 // CHECK1: omp.inner.for.cond:
1819 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1820 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
1821 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1822 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1823 // CHECK1: omp.inner.for.body:
1824 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1825 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1826 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1827 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
1828 // CHECK1-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
1829 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1830 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1831 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1832 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
1833 // CHECK1-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
1834 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1835 // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1836 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1837 // CHECK1-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
1838 // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1839 // CHECK1-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
1840 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1841 // CHECK1-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1842 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1843 // CHECK1-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
1844 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
1845 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
1846 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
1847 // CHECK1-NEXT: store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
1848 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
1849 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
1850 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
1851 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
1852 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
1853 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1854 // CHECK1: omp.body.continue:
1855 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1856 // CHECK1: omp.inner.for.inc:
1857 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1858 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1859 // CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1860 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1861 // CHECK1: omp.inner.for.end:
1862 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1863 // CHECK1: omp.loop.exit:
1864 // CHECK1-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1865 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1866 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1867 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1868 // CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1869 // CHECK1-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1870 // CHECK1: .omp.final.then:
1871 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1872 // CHECK1-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1873 // CHECK1-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1874 // CHECK1-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1875 // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1876 // CHECK1-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
1877 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1878 // CHECK1: .omp.final.done:
1879 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
1880 // CHECK1: omp.precond.end:
1881 // CHECK1-NEXT: ret void
1882 //
1883 //
1884 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
1885 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1886 // CHECK1-NEXT: entry:
1887 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
1888 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
1889 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
1890 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
1891 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
1892 // CHECK1-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
1893 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
1894 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
1895 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
1896 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
1897 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1898 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1899 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1900 // CHECK1-NEXT: ret void
1901 //
1902 //
1903 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
1904 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1905 // CHECK1-NEXT: entry:
1906 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1907 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1908 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
1909 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
1910 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
1911 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
1912 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
1913 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1914 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1915 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1916 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1917 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1918 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1919 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1920 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1921 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1922 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1923 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
1924 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1925 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1926 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1927 // CHECK1-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
1928 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
1929 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
1930 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
1931 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
1932 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1933 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1934 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1935 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1936 // CHECK1-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1937 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
1938 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1939 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
1940 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1941 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1942 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
1943 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1944 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
1945 // CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1946 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
1947 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1948 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
1949 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1950 // CHECK1: omp.precond.then:
1951 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1952 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1953 // CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
1954 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1955 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1956 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1957 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1958 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1959 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1960 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1961 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1962 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1963 // CHECK1: cond.true:
1964 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1965 // CHECK1-NEXT: br label [[COND_END:%.*]]
1966 // CHECK1: cond.false:
1967 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1968 // CHECK1-NEXT: br label [[COND_END]]
1969 // CHECK1: cond.end:
1970 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1971 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1972 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1973 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1974 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1975 // CHECK1: omp.inner.for.cond:
1976 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1977 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1978 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1979 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1980 // CHECK1: omp.inner.for.body:
1981 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
1982 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1983 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1984 // CHECK1-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1985 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
1986 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
1987 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
1988 // CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
1989 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
1990 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1991 // CHECK1: omp.inner.for.inc:
1992 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1993 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
1994 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1995 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1996 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
1997 // CHECK1: omp.inner.for.end:
1998 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1999 // CHECK1: omp.loop.exit:
2000 // CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2001 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2002 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2003 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2004 // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2005 // CHECK1-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2006 // CHECK1: .omp.final.then:
2007 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2008 // CHECK1-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2009 // CHECK1-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2010 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2011 // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2012 // CHECK1-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
2013 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2014 // CHECK1: .omp.final.done:
2015 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2016 // CHECK1: omp.precond.end:
2017 // CHECK1-NEXT: ret void
2018 //
2019 //
2020 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
2021 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2022 // CHECK1-NEXT: entry:
2023 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2024 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2025 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2026 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2027 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
2028 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
2029 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
2030 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
2031 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2032 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2033 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
2034 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2035 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2036 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
2037 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2038 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2039 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2040 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2041 // CHECK1-NEXT: [[I6:%.*]] = alloca i32, align 4
2042 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
2043 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2044 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2045 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2046 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2047 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
2048 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
2049 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
2050 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
2051 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2052 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2053 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2054 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2055 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2056 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2057 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2058 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2059 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2060 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2061 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2062 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2063 // CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2064 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
2065 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2066 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2067 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2068 // CHECK1: omp.precond.then:
2069 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2070 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2071 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2072 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2073 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2074 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2075 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2076 // CHECK1-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2077 // CHECK1-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2078 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2079 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2080 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
2081 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2082 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2083 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
2084 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2085 // CHECK1: omp.dispatch.cond:
2086 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2087 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2088 // CHECK1-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
2089 // CHECK1-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
2090 // CHECK1-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2091 // CHECK1: cond.true:
2092 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2093 // CHECK1-NEXT: [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
2094 // CHECK1-NEXT: br label [[COND_END:%.*]]
2095 // CHECK1: cond.false:
2096 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2097 // CHECK1-NEXT: br label [[COND_END]]
2098 // CHECK1: cond.end:
2099 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
2100 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2101 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2102 // CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2103 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2104 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2105 // CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2106 // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2107 // CHECK1: omp.dispatch.body:
2108 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2109 // CHECK1: omp.inner.for.cond:
2110 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2111 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
2112 // CHECK1-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
2113 // CHECK1-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2114 // CHECK1: omp.inner.for.body:
2115 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2116 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
2117 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2118 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
2119 // CHECK1-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
2120 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2121 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
2122 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
2123 // CHECK1-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
2124 // CHECK1-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
2125 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2126 // CHECK1-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
2127 // CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
2128 // CHECK1-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !40
2129 // CHECK1-NEXT: [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
2130 // CHECK1-NEXT: [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
2131 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2132 // CHECK1-NEXT: [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
2133 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
2134 // CHECK1-NEXT: store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !40
2135 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
2136 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
2137 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
2138 // CHECK1-NEXT: store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
2139 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
2140 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
2141 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
2142 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
2143 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
2144 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2145 // CHECK1: omp.body.continue:
2146 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2147 // CHECK1: omp.inner.for.inc:
2148 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2149 // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP35]], 1
2150 // CHECK1-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2151 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
2152 // CHECK1: omp.inner.for.end:
2153 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2154 // CHECK1: omp.dispatch.inc:
2155 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2156 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2157 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
2158 // CHECK1-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
2159 // CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2160 // CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2161 // CHECK1-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
2162 // CHECK1-NEXT: store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
2163 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
2164 // CHECK1: omp.dispatch.end:
2165 // CHECK1-NEXT: [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2166 // CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
2167 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
2168 // CHECK1-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2169 // CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
2170 // CHECK1-NEXT: br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2171 // CHECK1: .omp.final.then:
2172 // CHECK1-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2173 // CHECK1-NEXT: [[SUB20:%.*]] = sub nsw i32 [[TMP44]], 0
2174 // CHECK1-NEXT: [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
2175 // CHECK1-NEXT: [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
2176 // CHECK1-NEXT: [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
2177 // CHECK1-NEXT: store i32 [[ADD23]], i32* [[I6]], align 4
2178 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2179 // CHECK1: .omp.final.done:
2180 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2181 // CHECK1: omp.precond.end:
2182 // CHECK1-NEXT: ret void
2183 //
2184 //
2185 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
2186 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
2187 // CHECK1-NEXT: entry:
2188 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
2189 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
2190 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
2191 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
2192 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
2193 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
2194 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
2195 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
2196 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2197 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2198 // CHECK1-NEXT: ret void
2199 //
2200 //
2201 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18
2202 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2203 // CHECK1-NEXT: entry:
2204 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2205 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2206 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
2207 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
2208 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
2209 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
2210 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2211 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
2212 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2213 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2214 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
2215 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2216 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2217 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2218 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2219 // CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
2220 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2221 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2222 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
2223 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
2224 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
2225 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
2226 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2227 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2228 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2229 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2230 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2231 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2232 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2233 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2234 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2235 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2236 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2237 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
2238 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2239 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2240 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2241 // CHECK1: omp.precond.then:
2242 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2243 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2244 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2245 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2246 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2247 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2248 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2249 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2250 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2251 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2252 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2253 // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2254 // CHECK1: cond.true:
2255 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2256 // CHECK1-NEXT: br label [[COND_END:%.*]]
2257 // CHECK1: cond.false:
2258 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2259 // CHECK1-NEXT: br label [[COND_END]]
2260 // CHECK1: cond.end:
2261 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2262 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2263 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2264 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2265 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2266 // CHECK1: omp.inner.for.cond:
2267 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2268 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2269 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2270 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2271 // CHECK1: omp.inner.for.body:
2272 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
2273 // CHECK1-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2274 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2275 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2276 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
2277 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2278 // CHECK1: omp.inner.for.inc:
2279 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2280 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
2281 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2282 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2283 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
2284 // CHECK1: omp.inner.for.end:
2285 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2286 // CHECK1: omp.loop.exit:
2287 // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2288 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2289 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2290 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2291 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2292 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2293 // CHECK1: .omp.final.then:
2294 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2295 // CHECK1-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2296 // CHECK1-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2297 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2298 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2299 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
2300 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2301 // CHECK1: .omp.final.done:
2302 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2303 // CHECK1: omp.precond.end:
2304 // CHECK1-NEXT: ret void
2305 //
2306 //
2307 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..19
2308 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2309 // CHECK1-NEXT: entry:
2310 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2311 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2312 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2313 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2314 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
2315 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
2316 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
2317 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
2318 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2319 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
2320 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2321 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2322 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
2323 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2324 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2325 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2326 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2327 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
2328 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
2329 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2330 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2331 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2332 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2333 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
2334 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
2335 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
2336 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
2337 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2338 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2339 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2340 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2341 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2342 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2343 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2344 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2345 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2346 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2347 // CHECK1-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2348 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
2349 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2350 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2351 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2352 // CHECK1: omp.precond.then:
2353 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2354 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2355 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2356 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2357 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2358 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2359 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2360 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2361 // CHECK1-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2362 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2363 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2364 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2365 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2366 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2367 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
2368 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
2369 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2370 // CHECK1: omp.dispatch.cond:
2371 // CHECK1-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2372 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
2373 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2374 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
2375 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2376 // CHECK1: omp.dispatch.body:
2377 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2378 // CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2379 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2380 // CHECK1: omp.inner.for.cond:
2381 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2382 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
2383 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2384 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2385 // CHECK1: omp.inner.for.body:
2386 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2387 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
2388 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2389 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
2390 // CHECK1-NEXT: [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
2391 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2392 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
2393 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
2394 // CHECK1-NEXT: [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
2395 // CHECK1-NEXT: [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
2396 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2397 // CHECK1-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
2398 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
2399 // CHECK1-NEXT: [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
2400 // CHECK1-NEXT: [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
2401 // CHECK1-NEXT: [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
2402 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2403 // CHECK1-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
2404 // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
2405 // CHECK1-NEXT: store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
2406 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
2407 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
2408 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
2409 // CHECK1-NEXT: store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
2410 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
2411 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
2412 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
2413 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
2414 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
2415 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2416 // CHECK1: omp.body.continue:
2417 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2418 // CHECK1: omp.inner.for.inc:
2419 // CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2420 // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
2421 // CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2422 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
2423 // CHECK1: omp.inner.for.end:
2424 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2425 // CHECK1: omp.dispatch.inc:
2426 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
2427 // CHECK1: omp.dispatch.end:
2428 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2429 // CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2430 // CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2431 // CHECK1: .omp.final.then:
2432 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2433 // CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
2434 // CHECK1-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
2435 // CHECK1-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
2436 // CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
2437 // CHECK1-NEXT: store i32 [[ADD15]], i32* [[I4]], align 4
2438 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2439 // CHECK1: .omp.final.done:
2440 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2441 // CHECK1: omp.precond.end:
2442 // CHECK1-NEXT: ret void
2443 //
2444 //
2445 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
2446 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
2447 // CHECK1-NEXT: entry:
2448 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
2449 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
2450 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
2451 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
2452 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
2453 // CHECK1-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
2454 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
2455 // CHECK1-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
2456 // CHECK1-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
2457 // CHECK1-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
2458 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
2459 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2460 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2461 // CHECK1-NEXT: ret void
2462 //
2463 //
2464 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..22
2465 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2466 // CHECK1-NEXT: entry:
2467 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2468 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2469 // CHECK1-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
2470 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
2471 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
2472 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
2473 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
2474 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2475 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2476 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
2477 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2478 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2479 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
2480 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2481 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2482 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2483 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2484 // CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
2485 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2486 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2487 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2488 // CHECK1-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
2489 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
2490 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
2491 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
2492 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
2493 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
2494 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2495 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
2496 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
2497 // CHECK1-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
2498 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
2499 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
2500 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
2501 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2502 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2503 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
2504 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2505 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2506 // CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2507 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
2508 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2509 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
2510 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2511 // CHECK1: omp.precond.then:
2512 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2513 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2514 // CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
2515 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2516 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2517 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2518 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2519 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2520 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2521 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2522 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2523 // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2524 // CHECK1: cond.true:
2525 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2526 // CHECK1-NEXT: br label [[COND_END:%.*]]
2527 // CHECK1: cond.false:
2528 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2529 // CHECK1-NEXT: br label [[COND_END]]
2530 // CHECK1: cond.end:
2531 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2532 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2533 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2534 // CHECK1-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2535 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2536 // CHECK1: omp.inner.for.cond:
2537 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2538 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2539 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2540 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2541 // CHECK1: omp.inner.for.body:
2542 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
2543 // CHECK1-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2544 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2545 // CHECK1-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
2546 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
2547 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2548 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
2549 // CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
2550 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
2551 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2552 // CHECK1: omp.inner.for.inc:
2553 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2554 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
2555 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
2556 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2557 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
2558 // CHECK1: omp.inner.for.end:
2559 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2560 // CHECK1: omp.loop.exit:
2561 // CHECK1-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2562 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2563 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2564 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2565 // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2566 // CHECK1-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2567 // CHECK1: .omp.final.then:
2568 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2569 // CHECK1-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2570 // CHECK1-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2571 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2572 // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2573 // CHECK1-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
2574 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2575 // CHECK1: .omp.final.done:
2576 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2577 // CHECK1: omp.precond.end:
2578 // CHECK1-NEXT: ret void
2579 //
2580 //
2581 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..23
2582 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2583 // CHECK1-NEXT: entry:
2584 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2585 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2586 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2587 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2588 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
2589 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
2590 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
2591 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
2592 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2593 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2594 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
2595 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2596 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2597 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
2598 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2599 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2600 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2601 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2602 // CHECK1-NEXT: [[I6:%.*]] = alloca i32, align 4
2603 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
2604 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2605 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2606 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2607 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2608 // CHECK1-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
2609 // CHECK1-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
2610 // CHECK1-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
2611 // CHECK1-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
2612 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2613 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2614 // CHECK1-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2615 // CHECK1-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2616 // CHECK1-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2617 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2618 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2619 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2620 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2621 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2622 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2623 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2624 // CHECK1-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2625 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4
2626 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2627 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2628 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2629 // CHECK1: omp.precond.then:
2630 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2631 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2632 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2633 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2634 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2635 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2636 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2637 // CHECK1-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2638 // CHECK1-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2639 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2640 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2641 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
2642 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2643 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2644 // CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2645 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
2646 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
2647 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2648 // CHECK1: omp.dispatch.cond:
2649 // CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2650 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
2651 // CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2652 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
2653 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2654 // CHECK1: omp.dispatch.body:
2655 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2656 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
2657 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2658 // CHECK1: omp.inner.for.cond:
2659 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2660 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
2661 // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
2662 // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2663 // CHECK1: omp.inner.for.body:
2664 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2665 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
2666 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2667 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
2668 // CHECK1-NEXT: [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
2669 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2670 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
2671 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
2672 // CHECK1-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
2673 // CHECK1-NEXT: [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
2674 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2675 // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
2676 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
2677 // CHECK1-NEXT: [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
2678 // CHECK1-NEXT: [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
2679 // CHECK1-NEXT: [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
2680 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2681 // CHECK1-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
2682 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
2683 // CHECK1-NEXT: store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
2684 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
2685 // CHECK1-NEXT: store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
2686 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
2687 // CHECK1-NEXT: store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
2688 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
2689 // CHECK1-NEXT: store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
2690 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
2691 // CHECK1-NEXT: store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
2692 // CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
2693 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2694 // CHECK1: omp.body.continue:
2695 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2696 // CHECK1: omp.inner.for.inc:
2697 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2698 // CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
2699 // CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2700 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
2701 // CHECK1: omp.inner.for.end:
2702 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2703 // CHECK1: omp.dispatch.inc:
2704 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
2705 // CHECK1: omp.dispatch.end:
2706 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2707 // CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2708 // CHECK1-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2709 // CHECK1: .omp.final.then:
2710 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2711 // CHECK1-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
2712 // CHECK1-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
2713 // CHECK1-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
2714 // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
2715 // CHECK1-NEXT: store i32 [[ADD17]], i32* [[I6]], align 4
2716 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
2717 // CHECK1: .omp.final.done:
2718 // CHECK1-NEXT: br label [[OMP_PRECOND_END]]
2719 // CHECK1: omp.precond.end:
2720 // CHECK1-NEXT: ret void
2721 //
2722 //
2723 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2724 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
2725 // CHECK1-NEXT: entry:
2726 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
2727 // CHECK1-NEXT: ret void
2728 //
2729 //
2730 // CHECK3-LABEL: define {{[^@]+}}@main
2731 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
2732 // CHECK3-NEXT: entry:
2733 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
2734 // CHECK3-NEXT: [[A:%.*]] = alloca double*, align 4
2735 // CHECK3-NEXT: [[B:%.*]] = alloca double*, align 4
2736 // CHECK3-NEXT: [[C:%.*]] = alloca double*, align 4
2737 // CHECK3-NEXT: [[N:%.*]] = alloca i32, align 4
2738 // CHECK3-NEXT: [[CH:%.*]] = alloca i32, align 4
2739 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
2740 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
2741 // CHECK3-NEXT: store i32 10000, i32* [[N]], align 4
2742 // CHECK3-NEXT: store i32 100, i32* [[CH]], align 4
2743 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
2744 // CHECK3-NEXT: store i32* [[N]], i32** [[TMP0]], align 4
2745 // CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
2746 // CHECK3-NEXT: store double** [[A]], double*** [[TMP1]], align 4
2747 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
2748 // CHECK3-NEXT: store double** [[B]], double*** [[TMP2]], align 4
2749 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
2750 // CHECK3-NEXT: store double** [[C]], double*** [[TMP3]], align 4
2751 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
2752 // CHECK3-NEXT: store i32* [[CH]], i32** [[TMP4]], align 4
2753 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 4 dereferenceable(20) [[REF_TMP]])
2754 // CHECK3-NEXT: ret i32 0
2755 //
2756 //
2757 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
2758 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2:[0-9]+]] {
2759 // CHECK3-NEXT: entry:
2760 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
2761 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
2762 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
2763 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
2764 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
2765 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
2766 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
2767 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
2768 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2769 // CHECK3-NEXT: ret void
2770 //
2771 //
2772 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
2773 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
2774 // CHECK3-NEXT: entry:
2775 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2776 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2777 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
2778 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
2779 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
2780 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
2781 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2782 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
2783 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2784 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2785 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
2786 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2787 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2788 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2789 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2790 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
2791 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2792 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2793 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
2794 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
2795 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
2796 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
2797 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
2798 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
2799 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
2800 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
2801 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2802 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2803 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2804 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2805 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2806 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2807 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2808 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
2809 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2810 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2811 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2812 // CHECK3: omp.precond.then:
2813 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2814 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2815 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2816 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2817 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2818 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2819 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2820 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2821 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2822 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2823 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2824 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2825 // CHECK3: cond.true:
2826 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2827 // CHECK3-NEXT: br label [[COND_END:%.*]]
2828 // CHECK3: cond.false:
2829 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2830 // CHECK3-NEXT: br label [[COND_END]]
2831 // CHECK3: cond.end:
2832 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2833 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2834 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2835 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2836 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2837 // CHECK3: omp.inner.for.cond:
2838 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2839 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
2840 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2841 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2842 // CHECK3: omp.inner.for.body:
2843 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
2844 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
2845 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
2846 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2847 // CHECK3: omp.inner.for.inc:
2848 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2849 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
2850 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2851 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2852 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
2853 // CHECK3: omp.inner.for.end:
2854 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2855 // CHECK3: omp.loop.exit:
2856 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2857 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
2858 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
2859 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2860 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
2861 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2862 // CHECK3: .omp.final.then:
2863 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2864 // CHECK3-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
2865 // CHECK3-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2866 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2867 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2868 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
2869 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
2870 // CHECK3: .omp.final.done:
2871 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
2872 // CHECK3: omp.precond.end:
2873 // CHECK3-NEXT: ret void
2874 //
2875 //
2876 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
2877 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
2878 // CHECK3-NEXT: entry:
2879 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2880 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2881 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2882 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2883 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
2884 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
2885 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
2886 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
2887 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2888 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
2889 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2890 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2891 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
2892 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2893 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2894 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2895 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2896 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
2897 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
2898 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2899 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2900 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2901 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2902 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
2903 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
2904 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
2905 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
2906 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
2907 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
2908 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
2909 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
2910 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2911 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2912 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2913 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2914 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2915 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2916 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2917 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
2918 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2919 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2920 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2921 // CHECK3: omp.precond.then:
2922 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2923 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2924 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2925 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2926 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2927 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
2928 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
2929 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2930 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2931 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2932 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2933 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2934 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2935 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2936 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2937 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2938 // CHECK3: cond.true:
2939 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2940 // CHECK3-NEXT: br label [[COND_END:%.*]]
2941 // CHECK3: cond.false:
2942 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2943 // CHECK3-NEXT: br label [[COND_END]]
2944 // CHECK3: cond.end:
2945 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2946 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2947 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2948 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2949 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2950 // CHECK3: omp.inner.for.cond:
2951 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2952 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
2953 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2954 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2955 // CHECK3: omp.inner.for.body:
2956 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2957 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
2958 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2959 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
2960 // CHECK3-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
2961 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2962 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
2963 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
2964 // CHECK3-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
2965 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2966 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
2967 // CHECK3-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
2968 // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
2969 // CHECK3-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
2970 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2971 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
2972 // CHECK3-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
2973 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2974 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
2975 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2976 // CHECK3-NEXT: store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
2977 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2978 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
2979 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
2980 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
2981 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
2982 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2983 // CHECK3: omp.body.continue:
2984 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2985 // CHECK3: omp.inner.for.inc:
2986 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2987 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
2988 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2989 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
2990 // CHECK3: omp.inner.for.end:
2991 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2992 // CHECK3: omp.loop.exit:
2993 // CHECK3-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2994 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
2995 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
2996 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2997 // CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2998 // CHECK3-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2999 // CHECK3: .omp.final.then:
3000 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3001 // CHECK3-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3002 // CHECK3-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3003 // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3004 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3005 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
3006 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3007 // CHECK3: .omp.final.done:
3008 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3009 // CHECK3: omp.precond.end:
3010 // CHECK3-NEXT: ret void
3011 //
3012 //
3013 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
3014 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3015 // CHECK3-NEXT: entry:
3016 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
3017 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
3018 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
3019 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
3020 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
3021 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
3022 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
3023 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
3024 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3025 // CHECK3-NEXT: ret void
3026 //
3027 //
3028 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
3029 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3030 // CHECK3-NEXT: entry:
3031 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3032 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3033 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3034 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3035 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3036 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3037 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3038 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3039 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3040 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3041 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3042 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3043 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3044 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3045 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3046 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3047 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3048 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3049 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3050 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3051 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3052 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3053 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3054 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3055 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3056 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3057 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3058 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3059 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3060 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3061 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3062 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3063 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3064 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3065 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3066 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3067 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3068 // CHECK3: omp.precond.then:
3069 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3070 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3071 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3072 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3073 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3074 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3075 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3076 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3077 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3078 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3079 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3080 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3081 // CHECK3: cond.true:
3082 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3083 // CHECK3-NEXT: br label [[COND_END:%.*]]
3084 // CHECK3: cond.false:
3085 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3086 // CHECK3-NEXT: br label [[COND_END]]
3087 // CHECK3: cond.end:
3088 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3089 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3090 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3091 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3092 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3093 // CHECK3: omp.inner.for.cond:
3094 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3095 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
3096 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3097 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3098 // CHECK3: omp.inner.for.body:
3099 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
3100 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
3101 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
3102 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3103 // CHECK3: omp.inner.for.inc:
3104 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3105 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
3106 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3107 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3108 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
3109 // CHECK3: omp.inner.for.end:
3110 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3111 // CHECK3: omp.loop.exit:
3112 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3113 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3114 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3115 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3116 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3117 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3118 // CHECK3: .omp.final.then:
3119 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3120 // CHECK3-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
3121 // CHECK3-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3122 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3123 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3124 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
3125 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3126 // CHECK3: .omp.final.done:
3127 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3128 // CHECK3: omp.precond.end:
3129 // CHECK3-NEXT: ret void
3130 //
3131 //
3132 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
3133 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3134 // CHECK3-NEXT: entry:
3135 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3136 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3137 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3138 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3139 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3140 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3141 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3142 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3143 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3144 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3145 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3146 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3147 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3148 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3149 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3150 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3151 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3152 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3153 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
3154 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3155 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3156 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3157 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3158 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3159 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3160 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3161 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3162 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3163 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3164 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3165 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3166 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3167 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3168 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3169 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3170 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3171 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3172 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3173 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3174 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3175 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3176 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3177 // CHECK3: omp.precond.then:
3178 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
3179 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3180 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3181 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3182 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3183 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3184 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3185 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3186 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3187 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3188 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3189 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3190 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3191 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3192 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3193 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3194 // CHECK3: cond.true:
3195 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3196 // CHECK3-NEXT: br label [[COND_END:%.*]]
3197 // CHECK3: cond.false:
3198 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3199 // CHECK3-NEXT: br label [[COND_END]]
3200 // CHECK3: cond.end:
3201 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3202 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3203 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3204 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3205 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3206 // CHECK3: omp.inner.for.cond:
3207 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3208 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
3209 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3210 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3211 // CHECK3: omp.inner.for.body:
3212 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3213 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3214 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3215 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
3216 // CHECK3-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
3217 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3218 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3219 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
3220 // CHECK3-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
3221 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3222 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3223 // CHECK3-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
3224 // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3225 // CHECK3-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
3226 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3227 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3228 // CHECK3-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
3229 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
3230 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
3231 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
3232 // CHECK3-NEXT: store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
3233 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
3234 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
3235 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
3236 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
3237 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
3238 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3239 // CHECK3: omp.body.continue:
3240 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3241 // CHECK3: omp.inner.for.inc:
3242 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3243 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3244 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3245 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
3246 // CHECK3: omp.inner.for.end:
3247 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3248 // CHECK3: omp.loop.exit:
3249 // CHECK3-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3250 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3251 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3252 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3253 // CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3254 // CHECK3-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3255 // CHECK3: .omp.final.then:
3256 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3257 // CHECK3-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3258 // CHECK3-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3259 // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3260 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3261 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
3262 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3263 // CHECK3: .omp.final.done:
3264 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3265 // CHECK3: omp.precond.end:
3266 // CHECK3-NEXT: ret void
3267 //
3268 //
3269 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
3270 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3271 // CHECK3-NEXT: entry:
3272 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
3273 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
3274 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
3275 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
3276 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
3277 // CHECK3-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
3278 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
3279 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
3280 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
3281 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
3282 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3283 // CHECK3-NEXT: ret void
3284 //
3285 //
3286 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
3287 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3288 // CHECK3-NEXT: entry:
3289 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3290 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3291 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
3292 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3293 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3294 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3295 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3296 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3297 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3298 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3299 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3300 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3301 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3302 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3303 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3304 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3305 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3306 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3307 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3308 // CHECK3-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
3309 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3310 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3311 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3312 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3313 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
3314 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3315 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
3316 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
3317 // CHECK3-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
3318 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
3319 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3320 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3321 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
3322 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3323 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3324 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3325 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3326 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3327 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
3328 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3329 // CHECK3: omp.precond.then:
3330 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3331 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3332 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
3333 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3334 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3335 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
3336 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3337 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3338 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
3339 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3340 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3341 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3342 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3343 // CHECK3: cond.true:
3344 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3345 // CHECK3-NEXT: br label [[COND_END:%.*]]
3346 // CHECK3: cond.false:
3347 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3348 // CHECK3-NEXT: br label [[COND_END]]
3349 // CHECK3: cond.end:
3350 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3351 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3352 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3353 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3354 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3355 // CHECK3: omp.inner.for.cond:
3356 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3357 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3358 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
3359 // CHECK3-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
3360 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3361 // CHECK3: omp.inner.for.body:
3362 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3363 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3364 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
3365 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3366 // CHECK3: omp.inner.for.inc:
3367 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3368 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3369 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3370 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3371 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3372 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3373 // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3374 // CHECK3-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3375 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3376 // CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3377 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3378 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3379 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3380 // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3381 // CHECK3-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
3382 // CHECK3-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
3383 // CHECK3: cond.true10:
3384 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3385 // CHECK3-NEXT: br label [[COND_END12:%.*]]
3386 // CHECK3: cond.false11:
3387 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3388 // CHECK3-NEXT: br label [[COND_END12]]
3389 // CHECK3: cond.end12:
3390 // CHECK3-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
3391 // CHECK3-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3392 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3393 // CHECK3-NEXT: store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3394 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
3395 // CHECK3: omp.inner.for.end:
3396 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3397 // CHECK3: omp.loop.exit:
3398 // CHECK3-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3399 // CHECK3-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
3400 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
3401 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3402 // CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
3403 // CHECK3-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3404 // CHECK3: .omp.final.then:
3405 // CHECK3-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3406 // CHECK3-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
3407 // CHECK3-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
3408 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
3409 // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
3410 // CHECK3-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
3411 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3412 // CHECK3: .omp.final.done:
3413 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3414 // CHECK3: omp.precond.end:
3415 // CHECK3-NEXT: ret void
3416 //
3417 //
3418 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
3419 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3420 // CHECK3-NEXT: entry:
3421 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3422 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3423 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3424 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3425 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3426 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3427 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3428 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3429 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3430 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3431 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3432 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3433 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3434 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3435 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3436 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3437 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3438 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3439 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
3440 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3441 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3442 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3443 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3444 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3445 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3446 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3447 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3448 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3449 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3450 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3451 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3452 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3453 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3454 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3455 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3456 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3457 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3458 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3459 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3460 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3461 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3462 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3463 // CHECK3: omp.precond.then:
3464 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
3465 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3466 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3467 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3468 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3469 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3470 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3471 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3472 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3473 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3474 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3475 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3476 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3477 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3478 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3479 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3480 // CHECK3: cond.true:
3481 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3482 // CHECK3-NEXT: br label [[COND_END:%.*]]
3483 // CHECK3: cond.false:
3484 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3485 // CHECK3-NEXT: br label [[COND_END]]
3486 // CHECK3: cond.end:
3487 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3488 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3489 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3490 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3491 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3492 // CHECK3: omp.inner.for.cond:
3493 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3494 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
3495 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3496 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3497 // CHECK3: omp.inner.for.body:
3498 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3499 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3500 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3501 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
3502 // CHECK3-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
3503 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3504 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3505 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
3506 // CHECK3-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
3507 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3508 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3509 // CHECK3-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
3510 // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3511 // CHECK3-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
3512 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3513 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3514 // CHECK3-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
3515 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
3516 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
3517 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
3518 // CHECK3-NEXT: store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
3519 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
3520 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
3521 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
3522 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
3523 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
3524 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3525 // CHECK3: omp.body.continue:
3526 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3527 // CHECK3: omp.inner.for.inc:
3528 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3529 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3530 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3531 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
3532 // CHECK3: omp.inner.for.end:
3533 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3534 // CHECK3: omp.loop.exit:
3535 // CHECK3-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3536 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3537 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3538 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3539 // CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3540 // CHECK3-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3541 // CHECK3: .omp.final.then:
3542 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3543 // CHECK3-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3544 // CHECK3-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3545 // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3546 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3547 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
3548 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3549 // CHECK3: .omp.final.done:
3550 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3551 // CHECK3: omp.precond.end:
3552 // CHECK3-NEXT: ret void
3553 //
3554 //
3555 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
3556 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3557 // CHECK3-NEXT: entry:
3558 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
3559 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
3560 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
3561 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
3562 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
3563 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
3564 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
3565 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
3566 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3567 // CHECK3-NEXT: ret void
3568 //
3569 //
3570 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
3571 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3572 // CHECK3-NEXT: entry:
3573 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3574 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3575 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3576 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3577 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3578 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3579 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3580 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3581 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3582 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3583 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3584 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3585 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3586 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3587 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3588 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3589 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3590 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3591 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3592 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3593 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3594 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3595 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3596 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3597 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3598 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3599 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3600 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3601 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3602 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3603 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3604 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3605 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3606 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3607 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3608 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3609 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3610 // CHECK3: omp.precond.then:
3611 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3612 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3613 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3614 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3615 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3616 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3617 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3618 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3619 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3620 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3621 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3622 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3623 // CHECK3: cond.true:
3624 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3625 // CHECK3-NEXT: br label [[COND_END:%.*]]
3626 // CHECK3: cond.false:
3627 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3628 // CHECK3-NEXT: br label [[COND_END]]
3629 // CHECK3: cond.end:
3630 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3631 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3632 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3633 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3634 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3635 // CHECK3: omp.inner.for.cond:
3636 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3637 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
3638 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3639 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3640 // CHECK3: omp.inner.for.body:
3641 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
3642 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
3643 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
3644 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3645 // CHECK3: omp.inner.for.inc:
3646 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3647 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
3648 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3649 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3650 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
3651 // CHECK3: omp.inner.for.end:
3652 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3653 // CHECK3: omp.loop.exit:
3654 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3655 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3656 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3657 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3658 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3659 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3660 // CHECK3: .omp.final.then:
3661 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3662 // CHECK3-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
3663 // CHECK3-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3664 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3665 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3666 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
3667 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3668 // CHECK3: .omp.final.done:
3669 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3670 // CHECK3: omp.precond.end:
3671 // CHECK3-NEXT: ret void
3672 //
3673 //
3674 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
3675 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3676 // CHECK3-NEXT: entry:
3677 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3678 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3679 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3680 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3681 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3682 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3683 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3684 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3685 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3686 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3687 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3688 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3689 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3690 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3691 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3692 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3693 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3694 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
3695 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
3696 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3697 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3698 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3699 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3700 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3701 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3702 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3703 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3704 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3705 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3706 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3707 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3708 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3709 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3710 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3711 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3712 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3713 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3714 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3715 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3716 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3717 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3718 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3719 // CHECK3: omp.precond.then:
3720 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
3721 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3722 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3723 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3724 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3725 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3726 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3727 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3728 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3729 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3730 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3731 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3732 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3733 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3734 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3735 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3736 // CHECK3: cond.true:
3737 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3738 // CHECK3-NEXT: br label [[COND_END:%.*]]
3739 // CHECK3: cond.false:
3740 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3741 // CHECK3-NEXT: br label [[COND_END]]
3742 // CHECK3: cond.end:
3743 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3744 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3745 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3746 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3747 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3748 // CHECK3: omp.inner.for.cond:
3749 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3750 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
3751 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3752 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3753 // CHECK3: omp.inner.for.body:
3754 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3755 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3756 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3757 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
3758 // CHECK3-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
3759 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3760 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3761 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
3762 // CHECK3-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
3763 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3764 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3765 // CHECK3-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
3766 // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3767 // CHECK3-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
3768 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3769 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3770 // CHECK3-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
3771 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
3772 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
3773 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
3774 // CHECK3-NEXT: store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
3775 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
3776 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
3777 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
3778 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
3779 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
3780 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3781 // CHECK3: omp.body.continue:
3782 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3783 // CHECK3: omp.inner.for.inc:
3784 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3785 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3786 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3787 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
3788 // CHECK3: omp.inner.for.end:
3789 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3790 // CHECK3: omp.loop.exit:
3791 // CHECK3-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3792 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3793 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3794 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3795 // CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3796 // CHECK3-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3797 // CHECK3: .omp.final.then:
3798 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3799 // CHECK3-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3800 // CHECK3-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3801 // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3802 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3803 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
3804 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3805 // CHECK3: .omp.final.done:
3806 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3807 // CHECK3: omp.precond.end:
3808 // CHECK3-NEXT: ret void
3809 //
3810 //
3811 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
3812 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3813 // CHECK3-NEXT: entry:
3814 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
3815 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
3816 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
3817 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
3818 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
3819 // CHECK3-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
3820 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
3821 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
3822 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
3823 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
3824 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3825 // CHECK3-NEXT: ret void
3826 //
3827 //
3828 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
3829 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3830 // CHECK3-NEXT: entry:
3831 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3832 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3833 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
3834 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3835 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3836 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3837 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3838 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3839 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3840 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3841 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3842 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3843 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3844 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3845 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3846 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3847 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3848 // CHECK3-NEXT: [[I4:%.*]] = alloca i32, align 4
3849 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
3850 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3851 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3852 // CHECK3-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
3853 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3854 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3855 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3856 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3857 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
3858 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3859 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
3860 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
3861 // CHECK3-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
3862 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
3863 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3864 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
3865 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3866 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3867 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
3868 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3869 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3870 // CHECK3-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3871 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3872 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3873 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
3874 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3875 // CHECK3: omp.precond.then:
3876 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3877 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3878 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
3879 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3880 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3881 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3882 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3883 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3884 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3885 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3886 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3887 // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3888 // CHECK3: cond.true:
3889 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3890 // CHECK3-NEXT: br label [[COND_END:%.*]]
3891 // CHECK3: cond.false:
3892 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3893 // CHECK3-NEXT: br label [[COND_END]]
3894 // CHECK3: cond.end:
3895 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3896 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3897 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3898 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3899 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3900 // CHECK3: omp.inner.for.cond:
3901 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3902 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
3903 // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3904 // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3905 // CHECK3: omp.inner.for.body:
3906 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
3907 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
3908 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
3909 // CHECK3-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
3910 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
3911 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
3912 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3913 // CHECK3: omp.inner.for.inc:
3914 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3915 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
3916 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3917 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3918 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
3919 // CHECK3: omp.inner.for.end:
3920 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3921 // CHECK3: omp.loop.exit:
3922 // CHECK3-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3923 // CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
3924 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
3925 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3926 // CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
3927 // CHECK3-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3928 // CHECK3: .omp.final.then:
3929 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3930 // CHECK3-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
3931 // CHECK3-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
3932 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
3933 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
3934 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
3935 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
3936 // CHECK3: .omp.final.done:
3937 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
3938 // CHECK3: omp.precond.end:
3939 // CHECK3-NEXT: ret void
3940 //
3941 //
3942 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
3943 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
3944 // CHECK3-NEXT: entry:
3945 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3946 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3947 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3948 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3949 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
3950 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
3951 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
3952 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
3953 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
3954 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3955 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
3956 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3957 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3958 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
3959 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3960 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3961 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3962 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3963 // CHECK3-NEXT: [[I4:%.*]] = alloca i32, align 4
3964 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
3965 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3966 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3967 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3968 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3969 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
3970 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
3971 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
3972 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
3973 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
3974 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3975 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3976 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3977 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3978 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3979 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3980 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3981 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3982 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3983 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3984 // CHECK3-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3985 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
3986 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3987 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3988 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3989 // CHECK3: omp.precond.then:
3990 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
3991 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3992 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3993 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3994 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3995 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3996 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3997 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3998 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3999 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4000 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4001 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
4002 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
4003 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4004 // CHECK3: omp.dispatch.cond:
4005 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4006 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4007 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
4008 // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4009 // CHECK3: cond.true:
4010 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4011 // CHECK3-NEXT: br label [[COND_END:%.*]]
4012 // CHECK3: cond.false:
4013 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4014 // CHECK3-NEXT: br label [[COND_END]]
4015 // CHECK3: cond.end:
4016 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
4017 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4018 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4019 // CHECK3-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4020 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4021 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4022 // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4023 // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4024 // CHECK3: omp.dispatch.body:
4025 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4026 // CHECK3: omp.inner.for.cond:
4027 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4028 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
4029 // CHECK3-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
4030 // CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4031 // CHECK3: omp.inner.for.body:
4032 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4033 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
4034 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4035 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
4036 // CHECK3-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
4037 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4038 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
4039 // CHECK3-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
4040 // CHECK3-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
4041 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4042 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
4043 // CHECK3-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
4044 // CHECK3-NEXT: [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
4045 // CHECK3-NEXT: [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
4046 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4047 // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
4048 // CHECK3-NEXT: store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
4049 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
4050 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
4051 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
4052 // CHECK3-NEXT: store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
4053 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
4054 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
4055 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
4056 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
4057 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
4058 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4059 // CHECK3: omp.body.continue:
4060 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4061 // CHECK3: omp.inner.for.inc:
4062 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4063 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
4064 // CHECK3-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4065 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
4066 // CHECK3: omp.inner.for.end:
4067 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4068 // CHECK3: omp.dispatch.inc:
4069 // CHECK3-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4070 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4071 // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
4072 // CHECK3-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
4073 // CHECK3-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4074 // CHECK3-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4075 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
4076 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
4077 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
4078 // CHECK3: omp.dispatch.end:
4079 // CHECK3-NEXT: [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4080 // CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
4081 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
4082 // CHECK3-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4083 // CHECK3-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
4084 // CHECK3-NEXT: br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4085 // CHECK3: .omp.final.then:
4086 // CHECK3-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4087 // CHECK3-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
4088 // CHECK3-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
4089 // CHECK3-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
4090 // CHECK3-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
4091 // CHECK3-NEXT: store i32 [[ADD17]], i32* [[I4]], align 4
4092 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
4093 // CHECK3: .omp.final.done:
4094 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
4095 // CHECK3: omp.precond.end:
4096 // CHECK3-NEXT: ret void
4097 //
4098 //
4099 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
4100 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
4101 // CHECK3-NEXT: entry:
4102 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
4103 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
4104 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
4105 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
4106 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
4107 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
4108 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
4109 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
4110 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4111 // CHECK3-NEXT: ret void
4112 //
4113 //
4114 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18
4115 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4116 // CHECK3-NEXT: entry:
4117 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4118 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4119 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
4120 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
4121 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
4122 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
4123 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4124 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
4125 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4126 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4127 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
4128 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4129 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4130 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4131 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4132 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
4133 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4134 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4135 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
4136 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
4137 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
4138 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
4139 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4140 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4141 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4142 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4143 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4144 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4145 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4146 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4147 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4148 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4149 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4150 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
4151 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4152 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4153 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4154 // CHECK3: omp.precond.then:
4155 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4156 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4157 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4158 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4159 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4160 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4161 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4162 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4163 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4164 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4165 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4166 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4167 // CHECK3: cond.true:
4168 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4169 // CHECK3-NEXT: br label [[COND_END:%.*]]
4170 // CHECK3: cond.false:
4171 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4172 // CHECK3-NEXT: br label [[COND_END]]
4173 // CHECK3: cond.end:
4174 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4175 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4176 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4177 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4178 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4179 // CHECK3: omp.inner.for.cond:
4180 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4181 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
4182 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4183 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4184 // CHECK3: omp.inner.for.body:
4185 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
4186 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
4187 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
4188 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4189 // CHECK3: omp.inner.for.inc:
4190 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4191 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
4192 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4193 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4194 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
4195 // CHECK3: omp.inner.for.end:
4196 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4197 // CHECK3: omp.loop.exit:
4198 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4199 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4200 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4201 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4202 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4203 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4204 // CHECK3: .omp.final.then:
4205 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4206 // CHECK3-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
4207 // CHECK3-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4208 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4209 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4210 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
4211 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
4212 // CHECK3: .omp.final.done:
4213 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
4214 // CHECK3: omp.precond.end:
4215 // CHECK3-NEXT: ret void
4216 //
4217 //
4218 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..19
4219 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4220 // CHECK3-NEXT: entry:
4221 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4222 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4223 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4224 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4225 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
4226 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
4227 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
4228 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
4229 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4230 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
4231 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4232 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4233 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
4234 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4235 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4236 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4237 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4238 // CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
4239 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
4240 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4241 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4242 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4243 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4244 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
4245 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
4246 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
4247 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
4248 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4249 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4250 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4251 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4252 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4253 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4254 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4255 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4256 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4257 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4258 // CHECK3-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4259 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
4260 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4261 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4262 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4263 // CHECK3: omp.precond.then:
4264 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
4265 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4266 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4267 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4268 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4269 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4270 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4271 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4272 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4273 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4274 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4275 // CHECK3-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4276 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
4277 // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
4278 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4279 // CHECK3: omp.dispatch.cond:
4280 // CHECK3-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4281 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
4282 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4283 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
4284 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4285 // CHECK3: omp.dispatch.body:
4286 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4287 // CHECK3-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4288 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4289 // CHECK3: omp.inner.for.cond:
4290 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4291 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
4292 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4293 // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4294 // CHECK3: omp.inner.for.body:
4295 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4296 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
4297 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4298 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
4299 // CHECK3-NEXT: [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
4300 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4301 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
4302 // CHECK3-NEXT: [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
4303 // CHECK3-NEXT: [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
4304 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4305 // CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
4306 // CHECK3-NEXT: [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
4307 // CHECK3-NEXT: [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
4308 // CHECK3-NEXT: [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
4309 // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4310 // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
4311 // CHECK3-NEXT: store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
4312 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
4313 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
4314 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
4315 // CHECK3-NEXT: store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
4316 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
4317 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
4318 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
4319 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
4320 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
4321 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4322 // CHECK3: omp.body.continue:
4323 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4324 // CHECK3: omp.inner.for.inc:
4325 // CHECK3-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4326 // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
4327 // CHECK3-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4328 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
4329 // CHECK3: omp.inner.for.end:
4330 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4331 // CHECK3: omp.dispatch.inc:
4332 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
4333 // CHECK3: omp.dispatch.end:
4334 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4335 // CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4336 // CHECK3-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4337 // CHECK3: .omp.final.then:
4338 // CHECK3-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4339 // CHECK3-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
4340 // CHECK3-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
4341 // CHECK3-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
4342 // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4343 // CHECK3-NEXT: store i32 [[ADD12]], i32* [[I3]], align 4
4344 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
4345 // CHECK3: .omp.final.done:
4346 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
4347 // CHECK3: omp.precond.end:
4348 // CHECK3-NEXT: ret void
4349 //
4350 //
4351 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
4352 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
4353 // CHECK3-NEXT: entry:
4354 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
4355 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
4356 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
4357 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
4358 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
4359 // CHECK3-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
4360 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
4361 // CHECK3-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
4362 // CHECK3-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
4363 // CHECK3-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
4364 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4365 // CHECK3-NEXT: ret void
4366 //
4367 //
4368 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..22
4369 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4370 // CHECK3-NEXT: entry:
4371 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4372 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4373 // CHECK3-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
4374 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
4375 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
4376 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
4377 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
4378 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4379 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4380 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
4381 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4382 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4383 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
4384 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4385 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4386 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4387 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4388 // CHECK3-NEXT: [[I4:%.*]] = alloca i32, align 4
4389 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
4390 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4391 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4392 // CHECK3-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
4393 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
4394 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
4395 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
4396 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
4397 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
4398 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4399 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
4400 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
4401 // CHECK3-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
4402 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
4403 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
4404 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
4405 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4406 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4407 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
4408 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4409 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4410 // CHECK3-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4411 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
4412 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4413 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
4414 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4415 // CHECK3: omp.precond.then:
4416 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4417 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4418 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
4419 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4420 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4421 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4422 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4423 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4424 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4425 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4426 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4427 // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4428 // CHECK3: cond.true:
4429 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4430 // CHECK3-NEXT: br label [[COND_END:%.*]]
4431 // CHECK3: cond.false:
4432 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4433 // CHECK3-NEXT: br label [[COND_END]]
4434 // CHECK3: cond.end:
4435 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4436 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4437 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4438 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4439 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4440 // CHECK3: omp.inner.for.cond:
4441 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4442 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
4443 // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4444 // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4445 // CHECK3: omp.inner.for.body:
4446 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
4447 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
4448 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
4449 // CHECK3-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
4450 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
4451 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
4452 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4453 // CHECK3: omp.inner.for.inc:
4454 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4455 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
4456 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
4457 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4458 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
4459 // CHECK3: omp.inner.for.end:
4460 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4461 // CHECK3: omp.loop.exit:
4462 // CHECK3-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4463 // CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
4464 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
4465 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4466 // CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
4467 // CHECK3-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4468 // CHECK3: .omp.final.then:
4469 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4470 // CHECK3-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
4471 // CHECK3-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
4472 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
4473 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
4474 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
4475 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
4476 // CHECK3: .omp.final.done:
4477 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
4478 // CHECK3: omp.precond.end:
4479 // CHECK3-NEXT: ret void
4480 //
4481 //
4482 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..23
4483 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
4484 // CHECK3-NEXT: entry:
4485 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4486 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4487 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4488 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4489 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
4490 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
4491 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
4492 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
4493 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
4494 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4495 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
4496 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4497 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4498 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
4499 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4500 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4501 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4502 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4503 // CHECK3-NEXT: [[I4:%.*]] = alloca i32, align 4
4504 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
4505 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4506 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4507 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4508 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4509 // CHECK3-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
4510 // CHECK3-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
4511 // CHECK3-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
4512 // CHECK3-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
4513 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4514 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4515 // CHECK3-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4516 // CHECK3-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4517 // CHECK3-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4518 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4519 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4520 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4521 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4522 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4523 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4524 // CHECK3-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4525 // CHECK3-NEXT: store i32 0, i32* [[I]], align 4
4526 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4527 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4528 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4529 // CHECK3: omp.precond.then:
4530 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
4531 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4532 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4533 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4534 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4535 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4536 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4537 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4538 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4539 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4540 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4541 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4542 // CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4543 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
4544 // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
4545 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4546 // CHECK3: omp.dispatch.cond:
4547 // CHECK3-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4548 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
4549 // CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4550 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
4551 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4552 // CHECK3: omp.dispatch.body:
4553 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4554 // CHECK3-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
4555 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4556 // CHECK3: omp.inner.for.cond:
4557 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4558 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
4559 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4560 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4561 // CHECK3: omp.inner.for.body:
4562 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4563 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
4564 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4565 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
4566 // CHECK3-NEXT: [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
4567 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4568 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
4569 // CHECK3-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
4570 // CHECK3-NEXT: [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
4571 // CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4572 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
4573 // CHECK3-NEXT: [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
4574 // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
4575 // CHECK3-NEXT: [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
4576 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4577 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
4578 // CHECK3-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
4579 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
4580 // CHECK3-NEXT: store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
4581 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
4582 // CHECK3-NEXT: store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
4583 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
4584 // CHECK3-NEXT: store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
4585 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
4586 // CHECK3-NEXT: store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
4587 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
4588 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4589 // CHECK3: omp.body.continue:
4590 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4591 // CHECK3: omp.inner.for.inc:
4592 // CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4593 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
4594 // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4595 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
4596 // CHECK3: omp.inner.for.end:
4597 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4598 // CHECK3: omp.dispatch.inc:
4599 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
4600 // CHECK3: omp.dispatch.end:
4601 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4602 // CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4603 // CHECK3-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4604 // CHECK3: .omp.final.then:
4605 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4606 // CHECK3-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
4607 // CHECK3-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
4608 // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
4609 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
4610 // CHECK3-NEXT: store i32 [[ADD13]], i32* [[I4]], align 4
4611 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
4612 // CHECK3: .omp.final.done:
4613 // CHECK3-NEXT: br label [[OMP_PRECOND_END]]
4614 // CHECK3: omp.precond.end:
4615 // CHECK3-NEXT: ret void
4616 //
4617 //
4618 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4619 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
4620 // CHECK3-NEXT: entry:
4621 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
4622 // CHECK3-NEXT: ret void
4623 //
4624 //
4625 // CHECK5-LABEL: define {{[^@]+}}@main
4626 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
4627 // CHECK5-NEXT: entry:
4628 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4629 // CHECK5-NEXT: [[A:%.*]] = alloca double*, align 8
4630 // CHECK5-NEXT: [[B:%.*]] = alloca double*, align 8
4631 // CHECK5-NEXT: [[C:%.*]] = alloca double*, align 8
4632 // CHECK5-NEXT: [[N:%.*]] = alloca i32, align 4
4633 // CHECK5-NEXT: [[CH:%.*]] = alloca i32, align 4
4634 // CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
4635 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4
4636 // CHECK5-NEXT: store i32 10000, i32* [[N]], align 4
4637 // CHECK5-NEXT: store i32 100, i32* [[CH]], align 4
4638 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4639 // CHECK5-NEXT: store i32* [[N]], i32** [[TMP0]], align 8
4640 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4641 // CHECK5-NEXT: store double** [[A]], double*** [[TMP1]], align 8
4642 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4643 // CHECK5-NEXT: store double** [[B]], double*** [[TMP2]], align 8
4644 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4645 // CHECK5-NEXT: store double** [[C]], double*** [[TMP3]], align 8
4646 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4647 // CHECK5-NEXT: store i32* [[CH]], i32** [[TMP4]], align 8
4648 // CHECK5-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[REF_TMP]])
4649 // CHECK5-NEXT: ret i32 0
4650 //
4651 //
4652 // CHECK7-LABEL: define {{[^@]+}}@main
4653 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
4654 // CHECK7-NEXT: entry:
4655 // CHECK7-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4656 // CHECK7-NEXT: [[A:%.*]] = alloca double*, align 4
4657 // CHECK7-NEXT: [[B:%.*]] = alloca double*, align 4
4658 // CHECK7-NEXT: [[C:%.*]] = alloca double*, align 4
4659 // CHECK7-NEXT: [[N:%.*]] = alloca i32, align 4
4660 // CHECK7-NEXT: [[CH:%.*]] = alloca i32, align 4
4661 // CHECK7-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
4662 // CHECK7-NEXT: store i32 0, i32* [[RETVAL]], align 4
4663 // CHECK7-NEXT: store i32 10000, i32* [[N]], align 4
4664 // CHECK7-NEXT: store i32 100, i32* [[CH]], align 4
4665 // CHECK7-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4666 // CHECK7-NEXT: store i32* [[N]], i32** [[TMP0]], align 4
4667 // CHECK7-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4668 // CHECK7-NEXT: store double** [[A]], double*** [[TMP1]], align 4
4669 // CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4670 // CHECK7-NEXT: store double** [[B]], double*** [[TMP2]], align 4
4671 // CHECK7-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4672 // CHECK7-NEXT: store double** [[C]], double*** [[TMP3]], align 4
4673 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4674 // CHECK7-NEXT: store i32* [[CH]], i32** [[TMP4]], align 4
4675 // CHECK7-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 4 dereferenceable(20) [[REF_TMP]])
4676 // CHECK7-NEXT: ret i32 0
4677 //
4678 //
4679 // CHECK9-LABEL: define {{[^@]+}}@main
4680 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
4681 // CHECK9-NEXT: entry:
4682 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4683 // CHECK9-NEXT: [[A:%.*]] = alloca double*, align 8
4684 // CHECK9-NEXT: [[B:%.*]] = alloca double*, align 8
4685 // CHECK9-NEXT: [[C:%.*]] = alloca double*, align 8
4686 // CHECK9-NEXT: [[N:%.*]] = alloca i32, align 4
4687 // CHECK9-NEXT: [[CH:%.*]] = alloca i32, align 4
4688 // CHECK9-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
4689 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
4690 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
4691 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
4692 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4693 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4694 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4695 // CHECK9-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
4696 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
4697 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
4698 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
4699 // CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
4700 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
4701 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
4702 // CHECK9-NEXT: [[CH_CASTED:%.*]] = alloca i64, align 8
4703 // CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
4704 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [5 x i8*], align 8
4705 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [5 x i8*], align 8
4706 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [5 x i8*], align 8
4707 // CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
4708 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
4709 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4
4710 // CHECK9-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8
4711 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [4 x i8*], align 8
4712 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [4 x i8*], align 8
4713 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [4 x i8*], align 8
4714 // CHECK9-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
4715 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
4716 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
4717 // CHECK9-NEXT: [[CH_CASTED49:%.*]] = alloca i64, align 8
4718 // CHECK9-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
4719 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [5 x i8*], align 8
4720 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [5 x i8*], align 8
4721 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [5 x i8*], align 8
4722 // CHECK9-NEXT: [[_TMP56:%.*]] = alloca i32, align 4
4723 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4
4724 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4
4725 // CHECK9-NEXT: [[N_CASTED66:%.*]] = alloca i64, align 8
4726 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS68:%.*]] = alloca [4 x i8*], align 8
4727 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS69:%.*]] = alloca [4 x i8*], align 8
4728 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS70:%.*]] = alloca [4 x i8*], align 8
4729 // CHECK9-NEXT: [[_TMP71:%.*]] = alloca i32, align 4
4730 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_72:%.*]] = alloca i32, align 4
4731 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_73:%.*]] = alloca i32, align 4
4732 // CHECK9-NEXT: [[CH_CASTED81:%.*]] = alloca i64, align 8
4733 // CHECK9-NEXT: [[N_CASTED83:%.*]] = alloca i64, align 8
4734 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS85:%.*]] = alloca [5 x i8*], align 8
4735 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS86:%.*]] = alloca [5 x i8*], align 8
4736 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS87:%.*]] = alloca [5 x i8*], align 8
4737 // CHECK9-NEXT: [[_TMP88:%.*]] = alloca i32, align 4
4738 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_89:%.*]] = alloca i32, align 4
4739 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_90:%.*]] = alloca i32, align 4
4740 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4
4741 // CHECK9-NEXT: store i32 10000, i32* [[N]], align 4
4742 // CHECK9-NEXT: store i32 100, i32* [[CH]], align 4
4743 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
4744 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
4745 // CHECK9-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
4746 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
4747 // CHECK9-NEXT: [[TMP2:%.*]] = load double*, double** [[A]], align 8
4748 // CHECK9-NEXT: [[TMP3:%.*]] = load double*, double** [[B]], align 8
4749 // CHECK9-NEXT: [[TMP4:%.*]] = load double*, double** [[C]], align 8
4750 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4751 // CHECK9-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
4752 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
4753 // CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4754 // CHECK9-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
4755 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
4756 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
4757 // CHECK9-NEXT: store i8* null, i8** [[TMP9]], align 8
4758 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4759 // CHECK9-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
4760 // CHECK9-NEXT: store double* [[TMP2]], double** [[TMP11]], align 8
4761 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4762 // CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
4763 // CHECK9-NEXT: store double* [[TMP2]], double** [[TMP13]], align 8
4764 // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
4765 // CHECK9-NEXT: store i8* null, i8** [[TMP14]], align 8
4766 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4767 // CHECK9-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
4768 // CHECK9-NEXT: store double* [[TMP3]], double** [[TMP16]], align 8
4769 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4770 // CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
4771 // CHECK9-NEXT: store double* [[TMP3]], double** [[TMP18]], align 8
4772 // CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
4773 // CHECK9-NEXT: store i8* null, i8** [[TMP19]], align 8
4774 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4775 // CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
4776 // CHECK9-NEXT: store double* [[TMP4]], double** [[TMP21]], align 8
4777 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4778 // CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
4779 // CHECK9-NEXT: store double* [[TMP4]], double** [[TMP23]], align 8
4780 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
4781 // CHECK9-NEXT: store i8* null, i8** [[TMP24]], align 8
4782 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4783 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4784 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
4785 // CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
4786 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4787 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
4788 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4789 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4790 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4791 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4792 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
4793 // CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
4794 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
4795 // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
4796 // CHECK9-NEXT: store i32 1, i32* [[TMP31]], align 4
4797 // CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
4798 // CHECK9-NEXT: store i32 4, i32* [[TMP32]], align 4
4799 // CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
4800 // CHECK9-NEXT: store i8** [[TMP25]], i8*** [[TMP33]], align 8
4801 // CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
4802 // CHECK9-NEXT: store i8** [[TMP26]], i8*** [[TMP34]], align 8
4803 // CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
4804 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP35]], align 8
4805 // CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
4806 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP36]], align 8
4807 // CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
4808 // CHECK9-NEXT: store i8** null, i8*** [[TMP37]], align 8
4809 // CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
4810 // CHECK9-NEXT: store i8** null, i8*** [[TMP38]], align 8
4811 // CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
4812 // CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP39]], align 8
4813 // CHECK9-NEXT: [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
4814 // CHECK9-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
4815 // CHECK9-NEXT: br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4816 // CHECK9: omp_offload.failed:
4817 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
4818 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
4819 // CHECK9: omp_offload.cont:
4820 // CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[N]], align 4
4821 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
4822 // CHECK9-NEXT: store i32 [[TMP42]], i32* [[CONV4]], align 4
4823 // CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[N_CASTED3]], align 8
4824 // CHECK9-NEXT: [[TMP44:%.*]] = load double*, double** [[A]], align 8
4825 // CHECK9-NEXT: [[TMP45:%.*]] = load double*, double** [[B]], align 8
4826 // CHECK9-NEXT: [[TMP46:%.*]] = load double*, double** [[C]], align 8
4827 // CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
4828 // CHECK9-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
4829 // CHECK9-NEXT: store i64 [[TMP43]], i64* [[TMP48]], align 8
4830 // CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
4831 // CHECK9-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
4832 // CHECK9-NEXT: store i64 [[TMP43]], i64* [[TMP50]], align 8
4833 // CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
4834 // CHECK9-NEXT: store i8* null, i8** [[TMP51]], align 8
4835 // CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
4836 // CHECK9-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double**
4837 // CHECK9-NEXT: store double* [[TMP44]], double** [[TMP53]], align 8
4838 // CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
4839 // CHECK9-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to double**
4840 // CHECK9-NEXT: store double* [[TMP44]], double** [[TMP55]], align 8
4841 // CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
4842 // CHECK9-NEXT: store i8* null, i8** [[TMP56]], align 8
4843 // CHECK9-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
4844 // CHECK9-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to double**
4845 // CHECK9-NEXT: store double* [[TMP45]], double** [[TMP58]], align 8
4846 // CHECK9-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
4847 // CHECK9-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to double**
4848 // CHECK9-NEXT: store double* [[TMP45]], double** [[TMP60]], align 8
4849 // CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
4850 // CHECK9-NEXT: store i8* null, i8** [[TMP61]], align 8
4851 // CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
4852 // CHECK9-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to double**
4853 // CHECK9-NEXT: store double* [[TMP46]], double** [[TMP63]], align 8
4854 // CHECK9-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
4855 // CHECK9-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to double**
4856 // CHECK9-NEXT: store double* [[TMP46]], double** [[TMP65]], align 8
4857 // CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
4858 // CHECK9-NEXT: store i8* null, i8** [[TMP66]], align 8
4859 // CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
4860 // CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
4861 // CHECK9-NEXT: [[TMP69:%.*]] = load i32, i32* [[N]], align 4
4862 // CHECK9-NEXT: store i32 [[TMP69]], i32* [[DOTCAPTURE_EXPR_9]], align 4
4863 // CHECK9-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
4864 // CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP70]], 0
4865 // CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
4866 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
4867 // CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
4868 // CHECK9-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
4869 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP71]], 1
4870 // CHECK9-NEXT: [[TMP72:%.*]] = zext i32 [[ADD14]] to i64
4871 // CHECK9-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4872 // CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 0
4873 // CHECK9-NEXT: store i32 1, i32* [[TMP73]], align 4
4874 // CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 1
4875 // CHECK9-NEXT: store i32 4, i32* [[TMP74]], align 4
4876 // CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 2
4877 // CHECK9-NEXT: store i8** [[TMP67]], i8*** [[TMP75]], align 8
4878 // CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 3
4879 // CHECK9-NEXT: store i8** [[TMP68]], i8*** [[TMP76]], align 8
4880 // CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 4
4881 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64** [[TMP77]], align 8
4882 // CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 5
4883 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i64** [[TMP78]], align 8
4884 // CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 6
4885 // CHECK9-NEXT: store i8** null, i8*** [[TMP79]], align 8
4886 // CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 7
4887 // CHECK9-NEXT: store i8** null, i8*** [[TMP80]], align 8
4888 // CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 8
4889 // CHECK9-NEXT: store i64 [[TMP72]], i64* [[TMP81]], align 8
4890 // CHECK9-NEXT: [[TMP82:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]])
4891 // CHECK9-NEXT: [[TMP83:%.*]] = icmp ne i32 [[TMP82]], 0
4892 // CHECK9-NEXT: br i1 [[TMP83]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
4893 // CHECK9: omp_offload.failed16:
4894 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP43]], double* [[TMP44]], double* [[TMP45]], double* [[TMP46]]) #[[ATTR2]]
4895 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]]
4896 // CHECK9: omp_offload.cont17:
4897 // CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[CH]], align 4
4898 // CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
4899 // CHECK9-NEXT: store i32 [[TMP84]], i32* [[CONV18]], align 4
4900 // CHECK9-NEXT: [[TMP85:%.*]] = load i64, i64* [[CH_CASTED]], align 8
4901 // CHECK9-NEXT: [[TMP86:%.*]] = load i32, i32* [[N]], align 4
4902 // CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
4903 // CHECK9-NEXT: store i32 [[TMP86]], i32* [[CONV20]], align 4
4904 // CHECK9-NEXT: [[TMP87:%.*]] = load i64, i64* [[N_CASTED19]], align 8
4905 // CHECK9-NEXT: [[TMP88:%.*]] = load double*, double** [[A]], align 8
4906 // CHECK9-NEXT: [[TMP89:%.*]] = load double*, double** [[B]], align 8
4907 // CHECK9-NEXT: [[TMP90:%.*]] = load double*, double** [[C]], align 8
4908 // CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
4909 // CHECK9-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64*
4910 // CHECK9-NEXT: store i64 [[TMP85]], i64* [[TMP92]], align 8
4911 // CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
4912 // CHECK9-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64*
4913 // CHECK9-NEXT: store i64 [[TMP85]], i64* [[TMP94]], align 8
4914 // CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0
4915 // CHECK9-NEXT: store i8* null, i8** [[TMP95]], align 8
4916 // CHECK9-NEXT: [[TMP96:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
4917 // CHECK9-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64*
4918 // CHECK9-NEXT: store i64 [[TMP87]], i64* [[TMP97]], align 8
4919 // CHECK9-NEXT: [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
4920 // CHECK9-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64*
4921 // CHECK9-NEXT: store i64 [[TMP87]], i64* [[TMP99]], align 8
4922 // CHECK9-NEXT: [[TMP100:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1
4923 // CHECK9-NEXT: store i8* null, i8** [[TMP100]], align 8
4924 // CHECK9-NEXT: [[TMP101:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
4925 // CHECK9-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double**
4926 // CHECK9-NEXT: store double* [[TMP88]], double** [[TMP102]], align 8
4927 // CHECK9-NEXT: [[TMP103:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
4928 // CHECK9-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
4929 // CHECK9-NEXT: store double* [[TMP88]], double** [[TMP104]], align 8
4930 // CHECK9-NEXT: [[TMP105:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2
4931 // CHECK9-NEXT: store i8* null, i8** [[TMP105]], align 8
4932 // CHECK9-NEXT: [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
4933 // CHECK9-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to double**
4934 // CHECK9-NEXT: store double* [[TMP89]], double** [[TMP107]], align 8
4935 // CHECK9-NEXT: [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
4936 // CHECK9-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double**
4937 // CHECK9-NEXT: store double* [[TMP89]], double** [[TMP109]], align 8
4938 // CHECK9-NEXT: [[TMP110:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3
4939 // CHECK9-NEXT: store i8* null, i8** [[TMP110]], align 8
4940 // CHECK9-NEXT: [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
4941 // CHECK9-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to double**
4942 // CHECK9-NEXT: store double* [[TMP90]], double** [[TMP112]], align 8
4943 // CHECK9-NEXT: [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
4944 // CHECK9-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to double**
4945 // CHECK9-NEXT: store double* [[TMP90]], double** [[TMP114]], align 8
4946 // CHECK9-NEXT: [[TMP115:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 4
4947 // CHECK9-NEXT: store i8* null, i8** [[TMP115]], align 8
4948 // CHECK9-NEXT: [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
4949 // CHECK9-NEXT: [[TMP117:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
4950 // CHECK9-NEXT: [[TMP118:%.*]] = load i32, i32* [[N]], align 4
4951 // CHECK9-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_25]], align 4
4952 // CHECK9-NEXT: [[TMP119:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
4953 // CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP119]], 0
4954 // CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
4955 // CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1
4956 // CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4
4957 // CHECK9-NEXT: [[TMP120:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4
4958 // CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP120]], 1
4959 // CHECK9-NEXT: [[TMP121:%.*]] = zext i32 [[ADD30]] to i64
4960 // CHECK9-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4961 // CHECK9-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 0
4962 // CHECK9-NEXT: store i32 1, i32* [[TMP122]], align 4
4963 // CHECK9-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 1
4964 // CHECK9-NEXT: store i32 5, i32* [[TMP123]], align 4
4965 // CHECK9-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 2
4966 // CHECK9-NEXT: store i8** [[TMP116]], i8*** [[TMP124]], align 8
4967 // CHECK9-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 3
4968 // CHECK9-NEXT: store i8** [[TMP117]], i8*** [[TMP125]], align 8
4969 // CHECK9-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 4
4970 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP126]], align 8
4971 // CHECK9-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 5
4972 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP127]], align 8
4973 // CHECK9-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 6
4974 // CHECK9-NEXT: store i8** null, i8*** [[TMP128]], align 8
4975 // CHECK9-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 7
4976 // CHECK9-NEXT: store i8** null, i8*** [[TMP129]], align 8
4977 // CHECK9-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 8
4978 // CHECK9-NEXT: store i64 [[TMP121]], i64* [[TMP130]], align 8
4979 // CHECK9-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]])
4980 // CHECK9-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0
4981 // CHECK9-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
4982 // CHECK9: omp_offload.failed32:
4983 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP85]], i64 [[TMP87]], double* [[TMP88]], double* [[TMP89]], double* [[TMP90]]) #[[ATTR2]]
4984 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT33]]
4985 // CHECK9: omp_offload.cont33:
4986 // CHECK9-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
4987 // CHECK9-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32*
4988 // CHECK9-NEXT: store i32 [[TMP133]], i32* [[CONV35]], align 4
4989 // CHECK9-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED34]], align 8
4990 // CHECK9-NEXT: [[TMP135:%.*]] = load double*, double** [[A]], align 8
4991 // CHECK9-NEXT: [[TMP136:%.*]] = load double*, double** [[B]], align 8
4992 // CHECK9-NEXT: [[TMP137:%.*]] = load double*, double** [[C]], align 8
4993 // CHECK9-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
4994 // CHECK9-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
4995 // CHECK9-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
4996 // CHECK9-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
4997 // CHECK9-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
4998 // CHECK9-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
4999 // CHECK9-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0
5000 // CHECK9-NEXT: store i8* null, i8** [[TMP142]], align 8
5001 // CHECK9-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1
5002 // CHECK9-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to double**
5003 // CHECK9-NEXT: store double* [[TMP135]], double** [[TMP144]], align 8
5004 // CHECK9-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1
5005 // CHECK9-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to double**
5006 // CHECK9-NEXT: store double* [[TMP135]], double** [[TMP146]], align 8
5007 // CHECK9-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1
5008 // CHECK9-NEXT: store i8* null, i8** [[TMP147]], align 8
5009 // CHECK9-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2
5010 // CHECK9-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to double**
5011 // CHECK9-NEXT: store double* [[TMP136]], double** [[TMP149]], align 8
5012 // CHECK9-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2
5013 // CHECK9-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to double**
5014 // CHECK9-NEXT: store double* [[TMP136]], double** [[TMP151]], align 8
5015 // CHECK9-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2
5016 // CHECK9-NEXT: store i8* null, i8** [[TMP152]], align 8
5017 // CHECK9-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 3
5018 // CHECK9-NEXT: [[TMP154:%.*]] = bitcast i8** [[TMP153]] to double**
5019 // CHECK9-NEXT: store double* [[TMP137]], double** [[TMP154]], align 8
5020 // CHECK9-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 3
5021 // CHECK9-NEXT: [[TMP156:%.*]] = bitcast i8** [[TMP155]] to double**
5022 // CHECK9-NEXT: store double* [[TMP137]], double** [[TMP156]], align 8
5023 // CHECK9-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 3
5024 // CHECK9-NEXT: store i8* null, i8** [[TMP157]], align 8
5025 // CHECK9-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
5026 // CHECK9-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
5027 // CHECK9-NEXT: [[TMP160:%.*]] = load i32, i32* [[N]], align 4
5028 // CHECK9-NEXT: store i32 [[TMP160]], i32* [[DOTCAPTURE_EXPR_40]], align 4
5029 // CHECK9-NEXT: [[TMP161:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
5030 // CHECK9-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP161]], 0
5031 // CHECK9-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
5032 // CHECK9-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
5033 // CHECK9-NEXT: store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4
5034 // CHECK9-NEXT: [[TMP162:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
5035 // CHECK9-NEXT: [[ADD45:%.*]] = add nsw i32 [[TMP162]], 1
5036 // CHECK9-NEXT: [[TMP163:%.*]] = zext i32 [[ADD45]] to i64
5037 // CHECK9-NEXT: [[KERNEL_ARGS46:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5038 // CHECK9-NEXT: [[TMP164:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 0
5039 // CHECK9-NEXT: store i32 1, i32* [[TMP164]], align 4
5040 // CHECK9-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 1
5041 // CHECK9-NEXT: store i32 4, i32* [[TMP165]], align 4
5042 // CHECK9-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 2
5043 // CHECK9-NEXT: store i8** [[TMP158]], i8*** [[TMP166]], align 8
5044 // CHECK9-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 3
5045 // CHECK9-NEXT: store i8** [[TMP159]], i8*** [[TMP167]], align 8
5046 // CHECK9-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 4
5047 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64** [[TMP168]], align 8
5048 // CHECK9-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 5
5049 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i64** [[TMP169]], align 8
5050 // CHECK9-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 6
5051 // CHECK9-NEXT: store i8** null, i8*** [[TMP170]], align 8
5052 // CHECK9-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 7
5053 // CHECK9-NEXT: store i8** null, i8*** [[TMP171]], align 8
5054 // CHECK9-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 8
5055 // CHECK9-NEXT: store i64 [[TMP163]], i64* [[TMP172]], align 8
5056 // CHECK9-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]])
5057 // CHECK9-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
5058 // CHECK9-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]]
5059 // CHECK9: omp_offload.failed47:
5060 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP134]], double* [[TMP135]], double* [[TMP136]], double* [[TMP137]]) #[[ATTR2]]
5061 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT48]]
5062 // CHECK9: omp_offload.cont48:
5063 // CHECK9-NEXT: [[TMP175:%.*]] = load i32, i32* [[CH]], align 4
5064 // CHECK9-NEXT: [[CONV50:%.*]] = bitcast i64* [[CH_CASTED49]] to i32*
5065 // CHECK9-NEXT: store i32 [[TMP175]], i32* [[CONV50]], align 4
5066 // CHECK9-NEXT: [[TMP176:%.*]] = load i64, i64* [[CH_CASTED49]], align 8
5067 // CHECK9-NEXT: [[TMP177:%.*]] = load i32, i32* [[N]], align 4
5068 // CHECK9-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
5069 // CHECK9-NEXT: store i32 [[TMP177]], i32* [[CONV52]], align 4
5070 // CHECK9-NEXT: [[TMP178:%.*]] = load i64, i64* [[N_CASTED51]], align 8
5071 // CHECK9-NEXT: [[TMP179:%.*]] = load double*, double** [[A]], align 8
5072 // CHECK9-NEXT: [[TMP180:%.*]] = load double*, double** [[B]], align 8
5073 // CHECK9-NEXT: [[TMP181:%.*]] = load double*, double** [[C]], align 8
5074 // CHECK9-NEXT: [[TMP182:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0
5075 // CHECK9-NEXT: [[TMP183:%.*]] = bitcast i8** [[TMP182]] to i64*
5076 // CHECK9-NEXT: store i64 [[TMP176]], i64* [[TMP183]], align 8
5077 // CHECK9-NEXT: [[TMP184:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0
5078 // CHECK9-NEXT: [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
5079 // CHECK9-NEXT: store i64 [[TMP176]], i64* [[TMP185]], align 8
5080 // CHECK9-NEXT: [[TMP186:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0
5081 // CHECK9-NEXT: store i8* null, i8** [[TMP186]], align 8
5082 // CHECK9-NEXT: [[TMP187:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1
5083 // CHECK9-NEXT: [[TMP188:%.*]] = bitcast i8** [[TMP187]] to i64*
5084 // CHECK9-NEXT: store i64 [[TMP178]], i64* [[TMP188]], align 8
5085 // CHECK9-NEXT: [[TMP189:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1
5086 // CHECK9-NEXT: [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i64*
5087 // CHECK9-NEXT: store i64 [[TMP178]], i64* [[TMP190]], align 8
5088 // CHECK9-NEXT: [[TMP191:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1
5089 // CHECK9-NEXT: store i8* null, i8** [[TMP191]], align 8
5090 // CHECK9-NEXT: [[TMP192:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2
5091 // CHECK9-NEXT: [[TMP193:%.*]] = bitcast i8** [[TMP192]] to double**
5092 // CHECK9-NEXT: store double* [[TMP179]], double** [[TMP193]], align 8
5093 // CHECK9-NEXT: [[TMP194:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2
5094 // CHECK9-NEXT: [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
5095 // CHECK9-NEXT: store double* [[TMP179]], double** [[TMP195]], align 8
5096 // CHECK9-NEXT: [[TMP196:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2
5097 // CHECK9-NEXT: store i8* null, i8** [[TMP196]], align 8
5098 // CHECK9-NEXT: [[TMP197:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3
5099 // CHECK9-NEXT: [[TMP198:%.*]] = bitcast i8** [[TMP197]] to double**
5100 // CHECK9-NEXT: store double* [[TMP180]], double** [[TMP198]], align 8
5101 // CHECK9-NEXT: [[TMP199:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3
5102 // CHECK9-NEXT: [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
5103 // CHECK9-NEXT: store double* [[TMP180]], double** [[TMP200]], align 8
5104 // CHECK9-NEXT: [[TMP201:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3
5105 // CHECK9-NEXT: store i8* null, i8** [[TMP201]], align 8
5106 // CHECK9-NEXT: [[TMP202:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 4
5107 // CHECK9-NEXT: [[TMP203:%.*]] = bitcast i8** [[TMP202]] to double**
5108 // CHECK9-NEXT: store double* [[TMP181]], double** [[TMP203]], align 8
5109 // CHECK9-NEXT: [[TMP204:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 4
5110 // CHECK9-NEXT: [[TMP205:%.*]] = bitcast i8** [[TMP204]] to double**
5111 // CHECK9-NEXT: store double* [[TMP181]], double** [[TMP205]], align 8
5112 // CHECK9-NEXT: [[TMP206:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 4
5113 // CHECK9-NEXT: store i8* null, i8** [[TMP206]], align 8
5114 // CHECK9-NEXT: [[TMP207:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0
5115 // CHECK9-NEXT: [[TMP208:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0
5116 // CHECK9-NEXT: [[TMP209:%.*]] = load i32, i32* [[N]], align 4
5117 // CHECK9-NEXT: store i32 [[TMP209]], i32* [[DOTCAPTURE_EXPR_57]], align 4
5118 // CHECK9-NEXT: [[TMP210:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4
5119 // CHECK9-NEXT: [[SUB59:%.*]] = sub nsw i32 [[TMP210]], 0
5120 // CHECK9-NEXT: [[DIV60:%.*]] = sdiv i32 [[SUB59]], 1
5121 // CHECK9-NEXT: [[SUB61:%.*]] = sub nsw i32 [[DIV60]], 1
5122 // CHECK9-NEXT: store i32 [[SUB61]], i32* [[DOTCAPTURE_EXPR_58]], align 4
5123 // CHECK9-NEXT: [[TMP211:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4
5124 // CHECK9-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP211]], 1
5125 // CHECK9-NEXT: [[TMP212:%.*]] = zext i32 [[ADD62]] to i64
5126 // CHECK9-NEXT: [[KERNEL_ARGS63:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5127 // CHECK9-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 0
5128 // CHECK9-NEXT: store i32 1, i32* [[TMP213]], align 4
5129 // CHECK9-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 1
5130 // CHECK9-NEXT: store i32 5, i32* [[TMP214]], align 4
5131 // CHECK9-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 2
5132 // CHECK9-NEXT: store i8** [[TMP207]], i8*** [[TMP215]], align 8
5133 // CHECK9-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 3
5134 // CHECK9-NEXT: store i8** [[TMP208]], i8*** [[TMP216]], align 8
5135 // CHECK9-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 4
5136 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64** [[TMP217]], align 8
5137 // CHECK9-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 5
5138 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i64** [[TMP218]], align 8
5139 // CHECK9-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 6
5140 // CHECK9-NEXT: store i8** null, i8*** [[TMP219]], align 8
5141 // CHECK9-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 7
5142 // CHECK9-NEXT: store i8** null, i8*** [[TMP220]], align 8
5143 // CHECK9-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 8
5144 // CHECK9-NEXT: store i64 [[TMP212]], i64* [[TMP221]], align 8
5145 // CHECK9-NEXT: [[TMP222:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]])
5146 // CHECK9-NEXT: [[TMP223:%.*]] = icmp ne i32 [[TMP222]], 0
5147 // CHECK9-NEXT: br i1 [[TMP223]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]]
5148 // CHECK9: omp_offload.failed64:
5149 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP176]], i64 [[TMP178]], double* [[TMP179]], double* [[TMP180]], double* [[TMP181]]) #[[ATTR2]]
5150 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT65]]
5151 // CHECK9: omp_offload.cont65:
5152 // CHECK9-NEXT: [[TMP224:%.*]] = load i32, i32* [[N]], align 4
5153 // CHECK9-NEXT: [[CONV67:%.*]] = bitcast i64* [[N_CASTED66]] to i32*
5154 // CHECK9-NEXT: store i32 [[TMP224]], i32* [[CONV67]], align 4
5155 // CHECK9-NEXT: [[TMP225:%.*]] = load i64, i64* [[N_CASTED66]], align 8
5156 // CHECK9-NEXT: [[TMP226:%.*]] = load double*, double** [[A]], align 8
5157 // CHECK9-NEXT: [[TMP227:%.*]] = load double*, double** [[B]], align 8
5158 // CHECK9-NEXT: [[TMP228:%.*]] = load double*, double** [[C]], align 8
5159 // CHECK9-NEXT: [[TMP229:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 0
5160 // CHECK9-NEXT: [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i64*
5161 // CHECK9-NEXT: store i64 [[TMP225]], i64* [[TMP230]], align 8
5162 // CHECK9-NEXT: [[TMP231:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 0
5163 // CHECK9-NEXT: [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i64*
5164 // CHECK9-NEXT: store i64 [[TMP225]], i64* [[TMP232]], align 8
5165 // CHECK9-NEXT: [[TMP233:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 0
5166 // CHECK9-NEXT: store i8* null, i8** [[TMP233]], align 8
5167 // CHECK9-NEXT: [[TMP234:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 1
5168 // CHECK9-NEXT: [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
5169 // CHECK9-NEXT: store double* [[TMP226]], double** [[TMP235]], align 8
5170 // CHECK9-NEXT: [[TMP236:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 1
5171 // CHECK9-NEXT: [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
5172 // CHECK9-NEXT: store double* [[TMP226]], double** [[TMP237]], align 8
5173 // CHECK9-NEXT: [[TMP238:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 1
5174 // CHECK9-NEXT: store i8* null, i8** [[TMP238]], align 8
5175 // CHECK9-NEXT: [[TMP239:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 2
5176 // CHECK9-NEXT: [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
5177 // CHECK9-NEXT: store double* [[TMP227]], double** [[TMP240]], align 8
5178 // CHECK9-NEXT: [[TMP241:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 2
5179 // CHECK9-NEXT: [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
5180 // CHECK9-NEXT: store double* [[TMP227]], double** [[TMP242]], align 8
5181 // CHECK9-NEXT: [[TMP243:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 2
5182 // CHECK9-NEXT: store i8* null, i8** [[TMP243]], align 8
5183 // CHECK9-NEXT: [[TMP244:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 3
5184 // CHECK9-NEXT: [[TMP245:%.*]] = bitcast i8** [[TMP244]] to double**
5185 // CHECK9-NEXT: store double* [[TMP228]], double** [[TMP245]], align 8
5186 // CHECK9-NEXT: [[TMP246:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 3
5187 // CHECK9-NEXT: [[TMP247:%.*]] = bitcast i8** [[TMP246]] to double**
5188 // CHECK9-NEXT: store double* [[TMP228]], double** [[TMP247]], align 8
5189 // CHECK9-NEXT: [[TMP248:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 3
5190 // CHECK9-NEXT: store i8* null, i8** [[TMP248]], align 8
5191 // CHECK9-NEXT: [[TMP249:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 0
5192 // CHECK9-NEXT: [[TMP250:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 0
5193 // CHECK9-NEXT: [[TMP251:%.*]] = load i32, i32* [[N]], align 4
5194 // CHECK9-NEXT: store i32 [[TMP251]], i32* [[DOTCAPTURE_EXPR_72]], align 4
5195 // CHECK9-NEXT: [[TMP252:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_72]], align 4
5196 // CHECK9-NEXT: [[SUB74:%.*]] = sub nsw i32 [[TMP252]], 0
5197 // CHECK9-NEXT: [[DIV75:%.*]] = sdiv i32 [[SUB74]], 1
5198 // CHECK9-NEXT: [[SUB76:%.*]] = sub nsw i32 [[DIV75]], 1
5199 // CHECK9-NEXT: store i32 [[SUB76]], i32* [[DOTCAPTURE_EXPR_73]], align 4
5200 // CHECK9-NEXT: [[TMP253:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_73]], align 4
5201 // CHECK9-NEXT: [[ADD77:%.*]] = add nsw i32 [[TMP253]], 1
5202 // CHECK9-NEXT: [[TMP254:%.*]] = zext i32 [[ADD77]] to i64
5203 // CHECK9-NEXT: [[KERNEL_ARGS78:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5204 // CHECK9-NEXT: [[TMP255:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 0
5205 // CHECK9-NEXT: store i32 1, i32* [[TMP255]], align 4
5206 // CHECK9-NEXT: [[TMP256:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 1
5207 // CHECK9-NEXT: store i32 4, i32* [[TMP256]], align 4
5208 // CHECK9-NEXT: [[TMP257:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 2
5209 // CHECK9-NEXT: store i8** [[TMP249]], i8*** [[TMP257]], align 8
5210 // CHECK9-NEXT: [[TMP258:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 3
5211 // CHECK9-NEXT: store i8** [[TMP250]], i8*** [[TMP258]], align 8
5212 // CHECK9-NEXT: [[TMP259:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 4
5213 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64** [[TMP259]], align 8
5214 // CHECK9-NEXT: [[TMP260:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 5
5215 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i64** [[TMP260]], align 8
5216 // CHECK9-NEXT: [[TMP261:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 6
5217 // CHECK9-NEXT: store i8** null, i8*** [[TMP261]], align 8
5218 // CHECK9-NEXT: [[TMP262:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 7
5219 // CHECK9-NEXT: store i8** null, i8*** [[TMP262]], align 8
5220 // CHECK9-NEXT: [[TMP263:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 8
5221 // CHECK9-NEXT: store i64 [[TMP254]], i64* [[TMP263]], align 8
5222 // CHECK9-NEXT: [[TMP264:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]])
5223 // CHECK9-NEXT: [[TMP265:%.*]] = icmp ne i32 [[TMP264]], 0
5224 // CHECK9-NEXT: br i1 [[TMP265]], label [[OMP_OFFLOAD_FAILED79:%.*]], label [[OMP_OFFLOAD_CONT80:%.*]]
5225 // CHECK9: omp_offload.failed79:
5226 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP225]], double* [[TMP226]], double* [[TMP227]], double* [[TMP228]]) #[[ATTR2]]
5227 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT80]]
5228 // CHECK9: omp_offload.cont80:
5229 // CHECK9-NEXT: [[TMP266:%.*]] = load i32, i32* [[CH]], align 4
5230 // CHECK9-NEXT: [[CONV82:%.*]] = bitcast i64* [[CH_CASTED81]] to i32*
5231 // CHECK9-NEXT: store i32 [[TMP266]], i32* [[CONV82]], align 4
5232 // CHECK9-NEXT: [[TMP267:%.*]] = load i64, i64* [[CH_CASTED81]], align 8
5233 // CHECK9-NEXT: [[TMP268:%.*]] = load i32, i32* [[N]], align 4
5234 // CHECK9-NEXT: [[CONV84:%.*]] = bitcast i64* [[N_CASTED83]] to i32*
5235 // CHECK9-NEXT: store i32 [[TMP268]], i32* [[CONV84]], align 4
5236 // CHECK9-NEXT: [[TMP269:%.*]] = load i64, i64* [[N_CASTED83]], align 8
5237 // CHECK9-NEXT: [[TMP270:%.*]] = load double*, double** [[A]], align 8
5238 // CHECK9-NEXT: [[TMP271:%.*]] = load double*, double** [[B]], align 8
5239 // CHECK9-NEXT: [[TMP272:%.*]] = load double*, double** [[C]], align 8
5240 // CHECK9-NEXT: [[TMP273:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 0
5241 // CHECK9-NEXT: [[TMP274:%.*]] = bitcast i8** [[TMP273]] to i64*
5242 // CHECK9-NEXT: store i64 [[TMP267]], i64* [[TMP274]], align 8
5243 // CHECK9-NEXT: [[TMP275:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 0
5244 // CHECK9-NEXT: [[TMP276:%.*]] = bitcast i8** [[TMP275]] to i64*
5245 // CHECK9-NEXT: store i64 [[TMP267]], i64* [[TMP276]], align 8
5246 // CHECK9-NEXT: [[TMP277:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 0
5247 // CHECK9-NEXT: store i8* null, i8** [[TMP277]], align 8
5248 // CHECK9-NEXT: [[TMP278:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 1
5249 // CHECK9-NEXT: [[TMP279:%.*]] = bitcast i8** [[TMP278]] to i64*
5250 // CHECK9-NEXT: store i64 [[TMP269]], i64* [[TMP279]], align 8
5251 // CHECK9-NEXT: [[TMP280:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 1
5252 // CHECK9-NEXT: [[TMP281:%.*]] = bitcast i8** [[TMP280]] to i64*
5253 // CHECK9-NEXT: store i64 [[TMP269]], i64* [[TMP281]], align 8
5254 // CHECK9-NEXT: [[TMP282:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 1
5255 // CHECK9-NEXT: store i8* null, i8** [[TMP282]], align 8
5256 // CHECK9-NEXT: [[TMP283:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 2
5257 // CHECK9-NEXT: [[TMP284:%.*]] = bitcast i8** [[TMP283]] to double**
5258 // CHECK9-NEXT: store double* [[TMP270]], double** [[TMP284]], align 8
5259 // CHECK9-NEXT: [[TMP285:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 2
5260 // CHECK9-NEXT: [[TMP286:%.*]] = bitcast i8** [[TMP285]] to double**
5261 // CHECK9-NEXT: store double* [[TMP270]], double** [[TMP286]], align 8
5262 // CHECK9-NEXT: [[TMP287:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 2
5263 // CHECK9-NEXT: store i8* null, i8** [[TMP287]], align 8
5264 // CHECK9-NEXT: [[TMP288:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 3
5265 // CHECK9-NEXT: [[TMP289:%.*]] = bitcast i8** [[TMP288]] to double**
5266 // CHECK9-NEXT: store double* [[TMP271]], double** [[TMP289]], align 8
5267 // CHECK9-NEXT: [[TMP290:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 3
5268 // CHECK9-NEXT: [[TMP291:%.*]] = bitcast i8** [[TMP290]] to double**
5269 // CHECK9-NEXT: store double* [[TMP271]], double** [[TMP291]], align 8
5270 // CHECK9-NEXT: [[TMP292:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 3
5271 // CHECK9-NEXT: store i8* null, i8** [[TMP292]], align 8
5272 // CHECK9-NEXT: [[TMP293:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 4
5273 // CHECK9-NEXT: [[TMP294:%.*]] = bitcast i8** [[TMP293]] to double**
5274 // CHECK9-NEXT: store double* [[TMP272]], double** [[TMP294]], align 8
5275 // CHECK9-NEXT: [[TMP295:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 4
5276 // CHECK9-NEXT: [[TMP296:%.*]] = bitcast i8** [[TMP295]] to double**
5277 // CHECK9-NEXT: store double* [[TMP272]], double** [[TMP296]], align 8
5278 // CHECK9-NEXT: [[TMP297:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 4
5279 // CHECK9-NEXT: store i8* null, i8** [[TMP297]], align 8
5280 // CHECK9-NEXT: [[TMP298:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 0
5281 // CHECK9-NEXT: [[TMP299:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 0
5282 // CHECK9-NEXT: [[TMP300:%.*]] = load i32, i32* [[N]], align 4
5283 // CHECK9-NEXT: store i32 [[TMP300]], i32* [[DOTCAPTURE_EXPR_89]], align 4
5284 // CHECK9-NEXT: [[TMP301:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_89]], align 4
5285 // CHECK9-NEXT: [[SUB91:%.*]] = sub nsw i32 [[TMP301]], 0
5286 // CHECK9-NEXT: [[DIV92:%.*]] = sdiv i32 [[SUB91]], 1
5287 // CHECK9-NEXT: [[SUB93:%.*]] = sub nsw i32 [[DIV92]], 1
5288 // CHECK9-NEXT: store i32 [[SUB93]], i32* [[DOTCAPTURE_EXPR_90]], align 4
5289 // CHECK9-NEXT: [[TMP302:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_90]], align 4
5290 // CHECK9-NEXT: [[ADD94:%.*]] = add nsw i32 [[TMP302]], 1
5291 // CHECK9-NEXT: [[TMP303:%.*]] = zext i32 [[ADD94]] to i64
5292 // CHECK9-NEXT: [[KERNEL_ARGS95:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5293 // CHECK9-NEXT: [[TMP304:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 0
5294 // CHECK9-NEXT: store i32 1, i32* [[TMP304]], align 4
5295 // CHECK9-NEXT: [[TMP305:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 1
5296 // CHECK9-NEXT: store i32 5, i32* [[TMP305]], align 4
5297 // CHECK9-NEXT: [[TMP306:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 2
5298 // CHECK9-NEXT: store i8** [[TMP298]], i8*** [[TMP306]], align 8
5299 // CHECK9-NEXT: [[TMP307:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 3
5300 // CHECK9-NEXT: store i8** [[TMP299]], i8*** [[TMP307]], align 8
5301 // CHECK9-NEXT: [[TMP308:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 4
5302 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64** [[TMP308]], align 8
5303 // CHECK9-NEXT: [[TMP309:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 5
5304 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i64** [[TMP309]], align 8
5305 // CHECK9-NEXT: [[TMP310:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 6
5306 // CHECK9-NEXT: store i8** null, i8*** [[TMP310]], align 8
5307 // CHECK9-NEXT: [[TMP311:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 7
5308 // CHECK9-NEXT: store i8** null, i8*** [[TMP311]], align 8
5309 // CHECK9-NEXT: [[TMP312:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 8
5310 // CHECK9-NEXT: store i64 [[TMP303]], i64* [[TMP312]], align 8
5311 // CHECK9-NEXT: [[TMP313:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]])
5312 // CHECK9-NEXT: [[TMP314:%.*]] = icmp ne i32 [[TMP313]], 0
5313 // CHECK9-NEXT: br i1 [[TMP314]], label [[OMP_OFFLOAD_FAILED96:%.*]], label [[OMP_OFFLOAD_CONT97:%.*]]
5314 // CHECK9: omp_offload.failed96:
5315 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP267]], i64 [[TMP269]], double* [[TMP270]], double* [[TMP271]], double* [[TMP272]]) #[[ATTR2]]
5316 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT97]]
5317 // CHECK9: omp_offload.cont97:
5318 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
5319 // CHECK9-NEXT: ret i32 [[CALL]]
5320 //
5321 //
5322 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
5323 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
5324 // CHECK9-NEXT: entry:
5325 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5326 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
5327 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
5328 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
5329 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
5330 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
5331 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
5332 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
5333 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5334 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5335 // CHECK9-NEXT: ret void
5336 //
5337 //
5338 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
5339 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5340 // CHECK9-NEXT: entry:
5341 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5342 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5343 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5344 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5345 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5346 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5347 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5348 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5349 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5350 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5351 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5352 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5353 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5354 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5355 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5356 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
5357 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5358 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5359 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
5360 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
5361 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
5362 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
5363 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5364 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5365 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5366 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5367 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5368 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5369 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5370 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5371 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5372 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5373 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5374 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
5375 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5376 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5377 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5378 // CHECK9: omp.precond.then:
5379 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5380 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5381 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5382 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5383 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5384 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5385 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5386 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5387 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5388 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5389 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5390 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5391 // CHECK9: cond.true:
5392 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5393 // CHECK9-NEXT: br label [[COND_END:%.*]]
5394 // CHECK9: cond.false:
5395 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5396 // CHECK9-NEXT: br label [[COND_END]]
5397 // CHECK9: cond.end:
5398 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5399 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5400 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5401 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5402 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5403 // CHECK9: omp.inner.for.cond:
5404 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5405 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5406 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5407 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5408 // CHECK9: omp.inner.for.body:
5409 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
5410 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5411 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5412 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5413 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
5414 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5415 // CHECK9: omp.inner.for.inc:
5416 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5417 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
5418 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5419 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5420 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
5421 // CHECK9: omp.inner.for.end:
5422 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5423 // CHECK9: omp.loop.exit:
5424 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5425 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
5426 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
5427 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5428 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5429 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5430 // CHECK9: .omp.final.then:
5431 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5432 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
5433 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5434 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5435 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5436 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
5437 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
5438 // CHECK9: .omp.final.done:
5439 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
5440 // CHECK9: omp.precond.end:
5441 // CHECK9-NEXT: ret void
5442 //
5443 //
5444 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
5445 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5446 // CHECK9-NEXT: entry:
5447 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5448 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5449 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5450 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5451 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5452 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5453 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5454 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5455 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5456 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5457 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5458 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5459 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5460 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5461 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5462 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5463 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5464 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
5465 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5466 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5467 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5468 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5469 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
5470 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
5471 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
5472 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
5473 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5474 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5475 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5476 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5477 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5478 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5479 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5480 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5481 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5482 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5483 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5484 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
5485 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5486 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5487 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5488 // CHECK9: omp.precond.then:
5489 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
5490 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5491 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5492 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5493 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
5494 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5495 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
5496 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5497 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
5498 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5499 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5500 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5501 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5502 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5503 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5504 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5505 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5506 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5507 // CHECK9: cond.true:
5508 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5509 // CHECK9-NEXT: br label [[COND_END:%.*]]
5510 // CHECK9: cond.false:
5511 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5512 // CHECK9-NEXT: br label [[COND_END]]
5513 // CHECK9: cond.end:
5514 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5515 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5516 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5517 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5518 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5519 // CHECK9: omp.inner.for.cond:
5520 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5521 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
5522 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5523 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5524 // CHECK9: omp.inner.for.body:
5525 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5526 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5527 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5528 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
5529 // CHECK9-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
5530 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5531 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
5532 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
5533 // CHECK9-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
5534 // CHECK9-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
5535 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5536 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
5537 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
5538 // CHECK9-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
5539 // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
5540 // CHECK9-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
5541 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5542 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
5543 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
5544 // CHECK9-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
5545 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5546 // CHECK9: omp.body.continue:
5547 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5548 // CHECK9: omp.inner.for.inc:
5549 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5550 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
5551 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5552 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
5553 // CHECK9: omp.inner.for.end:
5554 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5555 // CHECK9: omp.loop.exit:
5556 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5557 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5558 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
5559 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5560 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
5561 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5562 // CHECK9: .omp.final.then:
5563 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5564 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
5565 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
5566 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
5567 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
5568 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
5569 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
5570 // CHECK9: .omp.final.done:
5571 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
5572 // CHECK9: omp.precond.end:
5573 // CHECK9-NEXT: ret void
5574 //
5575 //
5576 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
5577 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
5578 // CHECK9-NEXT: entry:
5579 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5580 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
5581 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
5582 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
5583 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
5584 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
5585 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
5586 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
5587 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5588 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5589 // CHECK9-NEXT: ret void
5590 //
5591 //
5592 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
5593 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5594 // CHECK9-NEXT: entry:
5595 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5596 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5597 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5598 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5599 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5600 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5601 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5602 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5603 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5604 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5605 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5606 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5607 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5608 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5609 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5610 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
5611 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5612 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5613 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
5614 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
5615 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
5616 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
5617 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5618 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5619 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5620 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5621 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5622 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5623 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5624 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5625 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5626 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5627 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5628 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
5629 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5630 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5631 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5632 // CHECK9: omp.precond.then:
5633 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5634 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5635 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5636 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5637 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5638 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5639 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5640 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5641 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5642 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5643 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5644 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5645 // CHECK9: cond.true:
5646 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5647 // CHECK9-NEXT: br label [[COND_END:%.*]]
5648 // CHECK9: cond.false:
5649 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5650 // CHECK9-NEXT: br label [[COND_END]]
5651 // CHECK9: cond.end:
5652 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5653 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5654 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5655 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5656 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5657 // CHECK9: omp.inner.for.cond:
5658 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5659 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5660 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5661 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5662 // CHECK9: omp.inner.for.body:
5663 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5664 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5665 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5666 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5667 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
5668 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5669 // CHECK9: omp.inner.for.inc:
5670 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5671 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5672 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5673 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5674 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
5675 // CHECK9: omp.inner.for.end:
5676 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5677 // CHECK9: omp.loop.exit:
5678 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5679 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
5680 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
5681 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5682 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5683 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5684 // CHECK9: .omp.final.then:
5685 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5686 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
5687 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5688 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5689 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5690 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
5691 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
5692 // CHECK9: .omp.final.done:
5693 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
5694 // CHECK9: omp.precond.end:
5695 // CHECK9-NEXT: ret void
5696 //
5697 //
5698 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
5699 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5700 // CHECK9-NEXT: entry:
5701 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5702 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5703 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5704 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5705 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5706 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5707 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5708 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5709 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5710 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5711 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5712 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5713 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5714 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5715 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5716 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5717 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5718 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
5719 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5720 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5721 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5722 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5723 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
5724 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
5725 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
5726 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
5727 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5728 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5729 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5730 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5731 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5732 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5733 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5734 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5735 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5736 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5737 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5738 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
5739 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5740 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5741 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5742 // CHECK9: omp.precond.then:
5743 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
5744 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5745 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5746 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5747 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
5748 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5749 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
5750 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5751 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
5752 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5753 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5754 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5755 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5756 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5757 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5758 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5759 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5760 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5761 // CHECK9: cond.true:
5762 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5763 // CHECK9-NEXT: br label [[COND_END:%.*]]
5764 // CHECK9: cond.false:
5765 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5766 // CHECK9-NEXT: br label [[COND_END]]
5767 // CHECK9: cond.end:
5768 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5769 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5770 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5771 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5772 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5773 // CHECK9: omp.inner.for.cond:
5774 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5775 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
5776 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5777 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5778 // CHECK9: omp.inner.for.body:
5779 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5780 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5781 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5782 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
5783 // CHECK9-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
5784 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5785 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
5786 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
5787 // CHECK9-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
5788 // CHECK9-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
5789 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5790 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
5791 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
5792 // CHECK9-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
5793 // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
5794 // CHECK9-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
5795 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5796 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
5797 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
5798 // CHECK9-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
5799 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5800 // CHECK9: omp.body.continue:
5801 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5802 // CHECK9: omp.inner.for.inc:
5803 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5804 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
5805 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5806 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
5807 // CHECK9: omp.inner.for.end:
5808 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5809 // CHECK9: omp.loop.exit:
5810 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5811 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5812 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
5813 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5814 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
5815 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5816 // CHECK9: .omp.final.then:
5817 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5818 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
5819 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
5820 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
5821 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
5822 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
5823 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
5824 // CHECK9: .omp.final.done:
5825 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
5826 // CHECK9: omp.precond.end:
5827 // CHECK9-NEXT: ret void
5828 //
5829 //
5830 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
5831 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
5832 // CHECK9-NEXT: entry:
5833 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
5834 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5835 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
5836 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
5837 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
5838 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
5839 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
5840 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
5841 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
5842 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
5843 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
5844 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5845 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5846 // CHECK9-NEXT: ret void
5847 //
5848 //
5849 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6
5850 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5851 // CHECK9-NEXT: entry:
5852 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5853 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5854 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
5855 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5856 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5857 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5858 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5859 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5860 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5861 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5862 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5863 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5864 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5865 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5866 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5867 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5868 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
5869 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5870 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5871 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
5872 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
5873 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
5874 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
5875 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
5876 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
5877 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5878 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
5879 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
5880 // CHECK9-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
5881 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
5882 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5883 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5884 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
5885 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5886 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5887 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5888 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
5889 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5890 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
5891 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5892 // CHECK9: omp.precond.then:
5893 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5894 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5895 // CHECK9-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
5896 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5897 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5898 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
5899 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5900 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5901 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
5902 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5903 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5904 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5905 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5906 // CHECK9: cond.true:
5907 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5908 // CHECK9-NEXT: br label [[COND_END:%.*]]
5909 // CHECK9: cond.false:
5910 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5911 // CHECK9-NEXT: br label [[COND_END]]
5912 // CHECK9: cond.end:
5913 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5914 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5915 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5916 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5917 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5918 // CHECK9: omp.inner.for.cond:
5919 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5920 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5921 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
5922 // CHECK9-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
5923 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5924 // CHECK9: omp.inner.for.body:
5925 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5926 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5927 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5928 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
5929 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
5930 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5931 // CHECK9: omp.inner.for.inc:
5932 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5933 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5934 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5935 // CHECK9-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5936 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5937 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5938 // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
5939 // CHECK9-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5940 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5941 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5942 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
5943 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5944 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5945 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5946 // CHECK9-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
5947 // CHECK9-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
5948 // CHECK9: cond.true10:
5949 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5950 // CHECK9-NEXT: br label [[COND_END12:%.*]]
5951 // CHECK9: cond.false11:
5952 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5953 // CHECK9-NEXT: br label [[COND_END12]]
5954 // CHECK9: cond.end12:
5955 // CHECK9-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
5956 // CHECK9-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5957 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5958 // CHECK9-NEXT: store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5959 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
5960 // CHECK9: omp.inner.for.end:
5961 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5962 // CHECK9: omp.loop.exit:
5963 // CHECK9-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5964 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
5965 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
5966 // CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5967 // CHECK9-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
5968 // CHECK9-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5969 // CHECK9: .omp.final.then:
5970 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5971 // CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
5972 // CHECK9-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
5973 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
5974 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
5975 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
5976 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
5977 // CHECK9: .omp.final.done:
5978 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
5979 // CHECK9: omp.precond.end:
5980 // CHECK9-NEXT: ret void
5981 //
5982 //
5983 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7
5984 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5985 // CHECK9-NEXT: entry:
5986 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5987 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5988 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5989 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5990 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
5991 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
5992 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
5993 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
5994 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5995 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
5996 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5997 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5998 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
5999 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6000 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6001 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6002 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6003 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
6004 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6005 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6006 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6007 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6008 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6009 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6010 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6011 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6012 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6013 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6014 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6015 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6016 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6017 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6018 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6019 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6020 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6021 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6022 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6023 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6024 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6025 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6026 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6027 // CHECK9: omp.precond.then:
6028 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
6029 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6030 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6031 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6032 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
6033 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6034 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
6035 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6036 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
6037 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6038 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6039 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6040 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6041 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6042 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6043 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6044 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6045 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6046 // CHECK9: cond.true:
6047 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6048 // CHECK9-NEXT: br label [[COND_END:%.*]]
6049 // CHECK9: cond.false:
6050 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6051 // CHECK9-NEXT: br label [[COND_END]]
6052 // CHECK9: cond.end:
6053 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6054 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6055 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6056 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6057 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6058 // CHECK9: omp.inner.for.cond:
6059 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
6060 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
6061 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6062 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6063 // CHECK9: omp.inner.for.body:
6064 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
6065 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6066 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6067 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
6068 // CHECK9-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
6069 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
6070 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
6071 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
6072 // CHECK9-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
6073 // CHECK9-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
6074 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
6075 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
6076 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
6077 // CHECK9-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
6078 // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
6079 // CHECK9-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
6080 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
6081 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
6082 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
6083 // CHECK9-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
6084 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6085 // CHECK9: omp.body.continue:
6086 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6087 // CHECK9: omp.inner.for.inc:
6088 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
6089 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
6090 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
6091 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
6092 // CHECK9: omp.inner.for.end:
6093 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6094 // CHECK9: omp.loop.exit:
6095 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6096 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
6097 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
6098 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6099 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
6100 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6101 // CHECK9: .omp.final.then:
6102 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6103 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
6104 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
6105 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
6106 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
6107 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
6108 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6109 // CHECK9: .omp.final.done:
6110 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6111 // CHECK9: omp.precond.end:
6112 // CHECK9-NEXT: ret void
6113 //
6114 //
6115 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
6116 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6117 // CHECK9-NEXT: entry:
6118 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6119 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
6120 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
6121 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
6122 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
6123 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
6124 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
6125 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
6126 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6127 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6128 // CHECK9-NEXT: ret void
6129 //
6130 //
6131 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10
6132 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6133 // CHECK9-NEXT: entry:
6134 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6135 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6136 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6137 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6138 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6139 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6140 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6141 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6142 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6143 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6144 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6145 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6146 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6147 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6148 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6149 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
6150 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6151 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6152 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6153 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6154 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6155 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6156 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6157 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6158 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6159 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6160 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6161 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6162 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6163 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6164 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6165 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6166 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6167 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6168 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6169 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6170 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6171 // CHECK9: omp.precond.then:
6172 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6173 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6174 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6175 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6176 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6177 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6178 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6179 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6180 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6181 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6182 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6183 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6184 // CHECK9: cond.true:
6185 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6186 // CHECK9-NEXT: br label [[COND_END:%.*]]
6187 // CHECK9: cond.false:
6188 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6189 // CHECK9-NEXT: br label [[COND_END]]
6190 // CHECK9: cond.end:
6191 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6192 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6193 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6194 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6195 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6196 // CHECK9: omp.inner.for.cond:
6197 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6198 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
6199 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6200 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6201 // CHECK9: omp.inner.for.body:
6202 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
6203 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
6204 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
6205 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6206 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
6207 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6208 // CHECK9: omp.inner.for.inc:
6209 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6210 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
6211 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
6212 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6213 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
6214 // CHECK9: omp.inner.for.end:
6215 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6216 // CHECK9: omp.loop.exit:
6217 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6218 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
6219 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
6220 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6221 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6222 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6223 // CHECK9: .omp.final.then:
6224 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6225 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
6226 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6227 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6228 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6229 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
6230 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6231 // CHECK9: .omp.final.done:
6232 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6233 // CHECK9: omp.precond.end:
6234 // CHECK9-NEXT: ret void
6235 //
6236 //
6237 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11
6238 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6239 // CHECK9-NEXT: entry:
6240 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6241 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6242 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6243 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6244 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6245 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6246 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6247 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6248 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6249 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6250 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6251 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6252 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6253 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6254 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6255 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6256 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6257 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
6258 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6259 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6260 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6261 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6262 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6263 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6264 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6265 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6266 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6267 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6268 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6269 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6270 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6271 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6272 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6273 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6274 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6275 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6276 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6277 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6278 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6279 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6280 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6281 // CHECK9: omp.precond.then:
6282 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
6283 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6284 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6285 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6286 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
6287 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6288 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
6289 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6290 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
6291 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6292 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6293 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6294 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6295 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6296 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6297 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6298 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6299 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6300 // CHECK9: cond.true:
6301 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6302 // CHECK9-NEXT: br label [[COND_END:%.*]]
6303 // CHECK9: cond.false:
6304 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6305 // CHECK9-NEXT: br label [[COND_END]]
6306 // CHECK9: cond.end:
6307 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6308 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6309 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6310 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6311 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6312 // CHECK9: omp.inner.for.cond:
6313 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6314 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
6315 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6316 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6317 // CHECK9: omp.inner.for.body:
6318 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6319 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6320 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6321 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
6322 // CHECK9-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
6323 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6324 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
6325 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
6326 // CHECK9-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
6327 // CHECK9-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
6328 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6329 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
6330 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
6331 // CHECK9-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
6332 // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
6333 // CHECK9-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
6334 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6335 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
6336 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
6337 // CHECK9-NEXT: store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
6338 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6339 // CHECK9: omp.body.continue:
6340 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6341 // CHECK9: omp.inner.for.inc:
6342 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6343 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
6344 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6345 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
6346 // CHECK9: omp.inner.for.end:
6347 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6348 // CHECK9: omp.loop.exit:
6349 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6350 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
6351 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
6352 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6353 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
6354 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6355 // CHECK9: .omp.final.then:
6356 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6357 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
6358 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
6359 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
6360 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
6361 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
6362 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6363 // CHECK9: .omp.final.done:
6364 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6365 // CHECK9: omp.precond.end:
6366 // CHECK9-NEXT: ret void
6367 //
6368 //
6369 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
6370 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6371 // CHECK9-NEXT: entry:
6372 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
6373 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6374 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
6375 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
6376 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
6377 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
6378 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
6379 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
6380 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
6381 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
6382 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
6383 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6384 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6385 // CHECK9-NEXT: ret void
6386 //
6387 //
6388 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14
6389 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6390 // CHECK9-NEXT: entry:
6391 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6392 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6393 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
6394 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6395 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6396 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6397 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6398 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6399 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6400 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6401 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6402 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6403 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6404 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6405 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6406 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6407 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6408 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
6409 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6410 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6411 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6412 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
6413 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6414 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6415 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6416 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6417 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
6418 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6419 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
6420 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
6421 // CHECK9-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
6422 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6423 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6424 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6425 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6426 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6427 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6428 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6429 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6430 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6431 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6432 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6433 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6434 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6435 // CHECK9: omp.precond.then:
6436 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6437 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6438 // CHECK9-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6439 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6440 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6441 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6442 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6443 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6444 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6445 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6446 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6447 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6448 // CHECK9: cond.true:
6449 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6450 // CHECK9-NEXT: br label [[COND_END:%.*]]
6451 // CHECK9: cond.false:
6452 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6453 // CHECK9-NEXT: br label [[COND_END]]
6454 // CHECK9: cond.end:
6455 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6456 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6457 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6458 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6459 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6460 // CHECK9: omp.inner.for.cond:
6461 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6462 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6463 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6464 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6465 // CHECK9: omp.inner.for.body:
6466 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
6467 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6468 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6469 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
6470 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
6471 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
6472 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
6473 // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
6474 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
6475 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6476 // CHECK9: omp.inner.for.inc:
6477 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6478 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
6479 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
6480 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6481 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
6482 // CHECK9: omp.inner.for.end:
6483 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6484 // CHECK9: omp.loop.exit:
6485 // CHECK9-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6486 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
6487 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
6488 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6489 // CHECK9-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
6490 // CHECK9-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6491 // CHECK9: .omp.final.then:
6492 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6493 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
6494 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6495 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6496 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6497 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
6498 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6499 // CHECK9: .omp.final.done:
6500 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6501 // CHECK9: omp.precond.end:
6502 // CHECK9-NEXT: ret void
6503 //
6504 //
6505 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15
6506 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
6507 // CHECK9-NEXT: entry:
6508 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6509 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6510 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6511 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6512 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6513 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6514 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6515 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6516 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6517 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6518 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6519 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6520 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6521 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6522 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6523 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6524 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6525 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6526 // CHECK9-NEXT: [[I6:%.*]] = alloca i32, align 4
6527 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6528 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6529 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6530 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6531 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6532 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6533 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6534 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6535 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
6536 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6537 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6538 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6539 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6540 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
6541 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6542 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6543 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6544 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6545 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6546 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6547 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6548 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6549 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6550 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6551 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6552 // CHECK9: omp.precond.then:
6553 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
6554 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6555 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6556 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6557 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6558 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6559 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
6560 // CHECK9-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
6561 // CHECK9-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
6562 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6563 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6564 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
6565 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6566 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
6567 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
6568 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6569 // CHECK9: omp.dispatch.cond:
6570 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6571 // CHECK9-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6572 // CHECK9-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
6573 // CHECK9-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
6574 // CHECK9-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6575 // CHECK9: cond.true:
6576 // CHECK9-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6577 // CHECK9-NEXT: [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
6578 // CHECK9-NEXT: br label [[COND_END:%.*]]
6579 // CHECK9: cond.false:
6580 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6581 // CHECK9-NEXT: br label [[COND_END]]
6582 // CHECK9: cond.end:
6583 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
6584 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6585 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6586 // CHECK9-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6587 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6588 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6589 // CHECK9-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6590 // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6591 // CHECK9: omp.dispatch.body:
6592 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6593 // CHECK9: omp.inner.for.cond:
6594 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6595 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
6596 // CHECK9-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
6597 // CHECK9-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6598 // CHECK9: omp.inner.for.body:
6599 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6600 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
6601 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6602 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
6603 // CHECK9-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
6604 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6605 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
6606 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
6607 // CHECK9-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
6608 // CHECK9-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
6609 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6610 // CHECK9-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
6611 // CHECK9-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
6612 // CHECK9-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !47
6613 // CHECK9-NEXT: [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
6614 // CHECK9-NEXT: [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
6615 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6616 // CHECK9-NEXT: [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
6617 // CHECK9-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
6618 // CHECK9-NEXT: store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !47
6619 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6620 // CHECK9: omp.body.continue:
6621 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6622 // CHECK9: omp.inner.for.inc:
6623 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6624 // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
6625 // CHECK9-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6626 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
6627 // CHECK9: omp.inner.for.end:
6628 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6629 // CHECK9: omp.dispatch.inc:
6630 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6631 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6632 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
6633 // CHECK9-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
6634 // CHECK9-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6635 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6636 // CHECK9-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
6637 // CHECK9-NEXT: store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
6638 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
6639 // CHECK9: omp.dispatch.end:
6640 // CHECK9-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6641 // CHECK9-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
6642 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
6643 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6644 // CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
6645 // CHECK9-NEXT: br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6646 // CHECK9: .omp.final.then:
6647 // CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6648 // CHECK9-NEXT: [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
6649 // CHECK9-NEXT: [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
6650 // CHECK9-NEXT: [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
6651 // CHECK9-NEXT: [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
6652 // CHECK9-NEXT: store i32 [[ADD23]], i32* [[I6]], align 4
6653 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6654 // CHECK9: .omp.final.done:
6655 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6656 // CHECK9: omp.precond.end:
6657 // CHECK9-NEXT: ret void
6658 //
6659 //
6660 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
6661 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6662 // CHECK9-NEXT: entry:
6663 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6664 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
6665 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
6666 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
6667 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
6668 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
6669 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
6670 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
6671 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6672 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6673 // CHECK9-NEXT: ret void
6674 //
6675 //
6676 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18
6677 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6678 // CHECK9-NEXT: entry:
6679 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6680 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6681 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6682 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6683 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6684 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6685 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6686 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6687 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6688 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6689 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6690 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6691 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6692 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6693 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6694 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
6695 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6696 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6697 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6698 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6699 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6700 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6701 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6702 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6703 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6704 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6705 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6706 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6707 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6708 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6709 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6710 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6711 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6712 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6713 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6714 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6715 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6716 // CHECK9: omp.precond.then:
6717 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6718 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6719 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6720 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6721 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6722 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6723 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6724 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6725 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6726 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6727 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6728 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6729 // CHECK9: cond.true:
6730 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6731 // CHECK9-NEXT: br label [[COND_END:%.*]]
6732 // CHECK9: cond.false:
6733 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6734 // CHECK9-NEXT: br label [[COND_END]]
6735 // CHECK9: cond.end:
6736 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6737 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6738 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6739 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6740 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6741 // CHECK9: omp.inner.for.cond:
6742 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6743 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6744 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6745 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6746 // CHECK9: omp.inner.for.body:
6747 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
6748 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
6749 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6750 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6751 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
6752 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6753 // CHECK9: omp.inner.for.inc:
6754 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6755 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
6756 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
6757 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6758 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
6759 // CHECK9: omp.inner.for.end:
6760 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6761 // CHECK9: omp.loop.exit:
6762 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6763 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
6764 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
6765 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6766 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6767 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6768 // CHECK9: .omp.final.then:
6769 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6770 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
6771 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6772 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6773 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6774 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
6775 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6776 // CHECK9: .omp.final.done:
6777 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6778 // CHECK9: omp.precond.end:
6779 // CHECK9-NEXT: ret void
6780 //
6781 //
6782 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19
6783 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6784 // CHECK9-NEXT: entry:
6785 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6786 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6787 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6788 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6789 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6790 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6791 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6792 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6793 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6794 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6795 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6796 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6797 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6798 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6799 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6800 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6801 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6802 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
6803 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6804 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6805 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6806 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6807 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6808 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6809 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6810 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6811 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6812 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6813 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6814 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6815 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6816 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6817 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6818 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6819 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6820 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6821 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6822 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6823 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6824 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6825 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6826 // CHECK9: omp.precond.then:
6827 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
6828 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6829 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6830 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6831 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
6832 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6833 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
6834 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6835 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
6836 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6837 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6838 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6839 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6840 // CHECK9-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6841 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
6842 // CHECK9-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
6843 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6844 // CHECK9: omp.dispatch.cond:
6845 // CHECK9-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6846 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
6847 // CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6848 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6849 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6850 // CHECK9: omp.dispatch.body:
6851 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6852 // CHECK9-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6853 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6854 // CHECK9: omp.inner.for.cond:
6855 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6856 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
6857 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6858 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6859 // CHECK9: omp.inner.for.body:
6860 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6861 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6862 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6863 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
6864 // CHECK9-NEXT: [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
6865 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6866 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
6867 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
6868 // CHECK9-NEXT: [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
6869 // CHECK9-NEXT: [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
6870 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6871 // CHECK9-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
6872 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
6873 // CHECK9-NEXT: [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
6874 // CHECK9-NEXT: [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
6875 // CHECK9-NEXT: [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
6876 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6877 // CHECK9-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
6878 // CHECK9-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
6879 // CHECK9-NEXT: store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
6880 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6881 // CHECK9: omp.body.continue:
6882 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6883 // CHECK9: omp.inner.for.inc:
6884 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6885 // CHECK9-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
6886 // CHECK9-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6887 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
6888 // CHECK9: omp.inner.for.end:
6889 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6890 // CHECK9: omp.dispatch.inc:
6891 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
6892 // CHECK9: omp.dispatch.end:
6893 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6894 // CHECK9-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
6895 // CHECK9-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6896 // CHECK9: .omp.final.then:
6897 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6898 // CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
6899 // CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
6900 // CHECK9-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
6901 // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
6902 // CHECK9-NEXT: store i32 [[ADD15]], i32* [[I4]], align 4
6903 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
6904 // CHECK9: .omp.final.done:
6905 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
6906 // CHECK9: omp.precond.end:
6907 // CHECK9-NEXT: ret void
6908 //
6909 //
6910 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
6911 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6912 // CHECK9-NEXT: entry:
6913 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
6914 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6915 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8
6916 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double*, align 8
6917 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double*, align 8
6918 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
6919 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
6920 // CHECK9-NEXT: store double* [[A]], double** [[A_ADDR]], align 8
6921 // CHECK9-NEXT: store double* [[B]], double** [[B_ADDR]], align 8
6922 // CHECK9-NEXT: store double* [[C]], double** [[C_ADDR]], align 8
6923 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
6924 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6925 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6926 // CHECK9-NEXT: ret void
6927 //
6928 //
6929 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..22
6930 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6931 // CHECK9-NEXT: entry:
6932 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6933 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6934 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
6935 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
6936 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
6937 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
6938 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
6939 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6940 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6941 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
6942 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6943 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6944 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
6945 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6946 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6947 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6948 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6949 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
6950 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6951 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6952 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6953 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
6954 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
6955 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
6956 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
6957 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
6958 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
6959 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6960 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
6961 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
6962 // CHECK9-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
6963 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6964 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6965 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6966 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6967 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6968 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6969 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6970 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6971 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6972 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
6973 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6974 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6975 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6976 // CHECK9: omp.precond.then:
6977 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6978 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6979 // CHECK9-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6980 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6981 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6982 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6983 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6984 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6985 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6986 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6987 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6988 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6989 // CHECK9: cond.true:
6990 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6991 // CHECK9-NEXT: br label [[COND_END:%.*]]
6992 // CHECK9: cond.false:
6993 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6994 // CHECK9-NEXT: br label [[COND_END]]
6995 // CHECK9: cond.end:
6996 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6997 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6998 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6999 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7000 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7001 // CHECK9: omp.inner.for.cond:
7002 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
7003 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
7004 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7005 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7006 // CHECK9: omp.inner.for.body:
7007 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
7008 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
7009 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
7010 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
7011 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
7012 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
7013 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
7014 // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
7015 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
7016 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7017 // CHECK9: omp.inner.for.inc:
7018 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
7019 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
7020 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
7021 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
7022 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
7023 // CHECK9: omp.inner.for.end:
7024 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7025 // CHECK9: omp.loop.exit:
7026 // CHECK9-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7027 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
7028 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
7029 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7030 // CHECK9-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
7031 // CHECK9-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7032 // CHECK9: .omp.final.then:
7033 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7034 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
7035 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
7036 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
7037 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
7038 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
7039 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
7040 // CHECK9: .omp.final.done:
7041 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
7042 // CHECK9: omp.precond.end:
7043 // CHECK9-NEXT: ret void
7044 //
7045 //
7046 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..23
7047 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
7048 // CHECK9-NEXT: entry:
7049 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7050 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7051 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7052 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7053 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
7054 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca double**, align 8
7055 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca double**, align 8
7056 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca double**, align 8
7057 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7058 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7059 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
7060 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7061 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7062 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
7063 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7064 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7065 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7066 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7067 // CHECK9-NEXT: [[I6:%.*]] = alloca i32, align 4
7068 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7069 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7070 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7071 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7072 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
7073 // CHECK9-NEXT: store double** [[A]], double*** [[A_ADDR]], align 8
7074 // CHECK9-NEXT: store double** [[B]], double*** [[B_ADDR]], align 8
7075 // CHECK9-NEXT: store double** [[C]], double*** [[C_ADDR]], align 8
7076 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7077 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7078 // CHECK9-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
7079 // CHECK9-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
7080 // CHECK9-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
7081 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
7082 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7083 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7084 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7085 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7086 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7087 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
7088 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7089 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
7090 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7091 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7092 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7093 // CHECK9: omp.precond.then:
7094 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
7095 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7096 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7097 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7098 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
7099 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7100 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
7101 // CHECK9-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
7102 // CHECK9-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
7103 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7104 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7105 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
7106 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7107 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7108 // CHECK9-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7109 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
7110 // CHECK9-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
7111 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7112 // CHECK9: omp.dispatch.cond:
7113 // CHECK9-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7114 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
7115 // CHECK9-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
7116 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
7117 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7118 // CHECK9: omp.dispatch.body:
7119 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7120 // CHECK9-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
7121 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7122 // CHECK9: omp.inner.for.cond:
7123 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7124 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
7125 // CHECK9-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
7126 // CHECK9-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7127 // CHECK9: omp.inner.for.body:
7128 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7129 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
7130 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7131 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
7132 // CHECK9-NEXT: [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
7133 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7134 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
7135 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
7136 // CHECK9-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
7137 // CHECK9-NEXT: [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
7138 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7139 // CHECK9-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
7140 // CHECK9-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
7141 // CHECK9-NEXT: [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
7142 // CHECK9-NEXT: [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
7143 // CHECK9-NEXT: [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
7144 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7145 // CHECK9-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
7146 // CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
7147 // CHECK9-NEXT: store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
7148 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7149 // CHECK9: omp.body.continue:
7150 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7151 // CHECK9: omp.inner.for.inc:
7152 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7153 // CHECK9-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
7154 // CHECK9-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7155 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
7156 // CHECK9: omp.inner.for.end:
7157 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7158 // CHECK9: omp.dispatch.inc:
7159 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
7160 // CHECK9: omp.dispatch.end:
7161 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7162 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
7163 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7164 // CHECK9: .omp.final.then:
7165 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7166 // CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
7167 // CHECK9-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7168 // CHECK9-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
7169 // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
7170 // CHECK9-NEXT: store i32 [[ADD17]], i32* [[I6]], align 4
7171 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
7172 // CHECK9: .omp.final.done:
7173 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
7174 // CHECK9: omp.precond.end:
7175 // CHECK9-NEXT: ret void
7176 //
7177 //
7178 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
7179 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] comdat {
7180 // CHECK9-NEXT: entry:
7181 // CHECK9-NEXT: [[A:%.*]] = alloca i32*, align 8
7182 // CHECK9-NEXT: [[B:%.*]] = alloca i32*, align 8
7183 // CHECK9-NEXT: [[C:%.*]] = alloca i32*, align 8
7184 // CHECK9-NEXT: [[N:%.*]] = alloca i32, align 4
7185 // CHECK9-NEXT: [[CH:%.*]] = alloca i32, align 4
7186 // CHECK9-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
7187 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
7188 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
7189 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
7190 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
7191 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7192 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7193 // CHECK9-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
7194 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
7195 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
7196 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
7197 // CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
7198 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
7199 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
7200 // CHECK9-NEXT: [[CH_CASTED:%.*]] = alloca i64, align 8
7201 // CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
7202 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [5 x i8*], align 8
7203 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [5 x i8*], align 8
7204 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [5 x i8*], align 8
7205 // CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
7206 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
7207 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4
7208 // CHECK9-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8
7209 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [4 x i8*], align 8
7210 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [4 x i8*], align 8
7211 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [4 x i8*], align 8
7212 // CHECK9-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
7213 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
7214 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
7215 // CHECK9-NEXT: [[CH_CASTED49:%.*]] = alloca i64, align 8
7216 // CHECK9-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
7217 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [5 x i8*], align 8
7218 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [5 x i8*], align 8
7219 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [5 x i8*], align 8
7220 // CHECK9-NEXT: [[_TMP56:%.*]] = alloca i32, align 4
7221 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4
7222 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4
7223 // CHECK9-NEXT: [[N_CASTED66:%.*]] = alloca i64, align 8
7224 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS68:%.*]] = alloca [4 x i8*], align 8
7225 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS69:%.*]] = alloca [4 x i8*], align 8
7226 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS70:%.*]] = alloca [4 x i8*], align 8
7227 // CHECK9-NEXT: [[_TMP71:%.*]] = alloca i32, align 4
7228 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_72:%.*]] = alloca i32, align 4
7229 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_73:%.*]] = alloca i32, align 4
7230 // CHECK9-NEXT: [[CH_CASTED81:%.*]] = alloca i64, align 8
7231 // CHECK9-NEXT: [[N_CASTED83:%.*]] = alloca i64, align 8
7232 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS85:%.*]] = alloca [5 x i8*], align 8
7233 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS86:%.*]] = alloca [5 x i8*], align 8
7234 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS87:%.*]] = alloca [5 x i8*], align 8
7235 // CHECK9-NEXT: [[_TMP88:%.*]] = alloca i32, align 4
7236 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_89:%.*]] = alloca i32, align 4
7237 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_90:%.*]] = alloca i32, align 4
7238 // CHECK9-NEXT: store i32 10000, i32* [[N]], align 4
7239 // CHECK9-NEXT: store i32 100, i32* [[CH]], align 4
7240 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
7241 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7242 // CHECK9-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
7243 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
7244 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
7245 // CHECK9-NEXT: [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
7246 // CHECK9-NEXT: [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
7247 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7248 // CHECK9-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
7249 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
7250 // CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7251 // CHECK9-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
7252 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8
7253 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7254 // CHECK9-NEXT: store i8* null, i8** [[TMP9]], align 8
7255 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7256 // CHECK9-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
7257 // CHECK9-NEXT: store i32* [[TMP2]], i32** [[TMP11]], align 8
7258 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7259 // CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
7260 // CHECK9-NEXT: store i32* [[TMP2]], i32** [[TMP13]], align 8
7261 // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7262 // CHECK9-NEXT: store i8* null, i8** [[TMP14]], align 8
7263 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7264 // CHECK9-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
7265 // CHECK9-NEXT: store i32* [[TMP3]], i32** [[TMP16]], align 8
7266 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7267 // CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
7268 // CHECK9-NEXT: store i32* [[TMP3]], i32** [[TMP18]], align 8
7269 // CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7270 // CHECK9-NEXT: store i8* null, i8** [[TMP19]], align 8
7271 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
7272 // CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
7273 // CHECK9-NEXT: store i32* [[TMP4]], i32** [[TMP21]], align 8
7274 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
7275 // CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
7276 // CHECK9-NEXT: store i32* [[TMP4]], i32** [[TMP23]], align 8
7277 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
7278 // CHECK9-NEXT: store i8* null, i8** [[TMP24]], align 8
7279 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7280 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7281 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
7282 // CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
7283 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7284 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
7285 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7286 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7287 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7288 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7289 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
7290 // CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
7291 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
7292 // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
7293 // CHECK9-NEXT: store i32 1, i32* [[TMP31]], align 4
7294 // CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
7295 // CHECK9-NEXT: store i32 4, i32* [[TMP32]], align 4
7296 // CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
7297 // CHECK9-NEXT: store i8** [[TMP25]], i8*** [[TMP33]], align 8
7298 // CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
7299 // CHECK9-NEXT: store i8** [[TMP26]], i8*** [[TMP34]], align 8
7300 // CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
7301 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64** [[TMP35]], align 8
7302 // CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
7303 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i64** [[TMP36]], align 8
7304 // CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
7305 // CHECK9-NEXT: store i8** null, i8*** [[TMP37]], align 8
7306 // CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
7307 // CHECK9-NEXT: store i8** null, i8*** [[TMP38]], align 8
7308 // CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
7309 // CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP39]], align 8
7310 // CHECK9-NEXT: [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
7311 // CHECK9-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
7312 // CHECK9-NEXT: br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7313 // CHECK9: omp_offload.failed:
7314 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
7315 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
7316 // CHECK9: omp_offload.cont:
7317 // CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[N]], align 4
7318 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
7319 // CHECK9-NEXT: store i32 [[TMP42]], i32* [[CONV4]], align 4
7320 // CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[N_CASTED3]], align 8
7321 // CHECK9-NEXT: [[TMP44:%.*]] = load i32*, i32** [[A]], align 8
7322 // CHECK9-NEXT: [[TMP45:%.*]] = load i32*, i32** [[B]], align 8
7323 // CHECK9-NEXT: [[TMP46:%.*]] = load i32*, i32** [[C]], align 8
7324 // CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
7325 // CHECK9-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
7326 // CHECK9-NEXT: store i64 [[TMP43]], i64* [[TMP48]], align 8
7327 // CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
7328 // CHECK9-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
7329 // CHECK9-NEXT: store i64 [[TMP43]], i64* [[TMP50]], align 8
7330 // CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
7331 // CHECK9-NEXT: store i8* null, i8** [[TMP51]], align 8
7332 // CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
7333 // CHECK9-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32**
7334 // CHECK9-NEXT: store i32* [[TMP44]], i32** [[TMP53]], align 8
7335 // CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
7336 // CHECK9-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
7337 // CHECK9-NEXT: store i32* [[TMP44]], i32** [[TMP55]], align 8
7338 // CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
7339 // CHECK9-NEXT: store i8* null, i8** [[TMP56]], align 8
7340 // CHECK9-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
7341 // CHECK9-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
7342 // CHECK9-NEXT: store i32* [[TMP45]], i32** [[TMP58]], align 8
7343 // CHECK9-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
7344 // CHECK9-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32**
7345 // CHECK9-NEXT: store i32* [[TMP45]], i32** [[TMP60]], align 8
7346 // CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
7347 // CHECK9-NEXT: store i8* null, i8** [[TMP61]], align 8
7348 // CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
7349 // CHECK9-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32**
7350 // CHECK9-NEXT: store i32* [[TMP46]], i32** [[TMP63]], align 8
7351 // CHECK9-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
7352 // CHECK9-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32**
7353 // CHECK9-NEXT: store i32* [[TMP46]], i32** [[TMP65]], align 8
7354 // CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
7355 // CHECK9-NEXT: store i8* null, i8** [[TMP66]], align 8
7356 // CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
7357 // CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
7358 // CHECK9-NEXT: [[TMP69:%.*]] = load i32, i32* [[N]], align 4
7359 // CHECK9-NEXT: store i32 [[TMP69]], i32* [[DOTCAPTURE_EXPR_9]], align 4
7360 // CHECK9-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
7361 // CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP70]], 0
7362 // CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
7363 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
7364 // CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
7365 // CHECK9-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
7366 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP71]], 1
7367 // CHECK9-NEXT: [[TMP72:%.*]] = zext i32 [[ADD14]] to i64
7368 // CHECK9-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7369 // CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 0
7370 // CHECK9-NEXT: store i32 1, i32* [[TMP73]], align 4
7371 // CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 1
7372 // CHECK9-NEXT: store i32 4, i32* [[TMP74]], align 4
7373 // CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 2
7374 // CHECK9-NEXT: store i8** [[TMP67]], i8*** [[TMP75]], align 8
7375 // CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 3
7376 // CHECK9-NEXT: store i8** [[TMP68]], i8*** [[TMP76]], align 8
7377 // CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 4
7378 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64** [[TMP77]], align 8
7379 // CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 5
7380 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i64** [[TMP78]], align 8
7381 // CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 6
7382 // CHECK9-NEXT: store i8** null, i8*** [[TMP79]], align 8
7383 // CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 7
7384 // CHECK9-NEXT: store i8** null, i8*** [[TMP80]], align 8
7385 // CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 8
7386 // CHECK9-NEXT: store i64 [[TMP72]], i64* [[TMP81]], align 8
7387 // CHECK9-NEXT: [[TMP82:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]])
7388 // CHECK9-NEXT: [[TMP83:%.*]] = icmp ne i32 [[TMP82]], 0
7389 // CHECK9-NEXT: br i1 [[TMP83]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
7390 // CHECK9: omp_offload.failed16:
7391 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP43]], i32* [[TMP44]], i32* [[TMP45]], i32* [[TMP46]]) #[[ATTR2]]
7392 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]]
7393 // CHECK9: omp_offload.cont17:
7394 // CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[CH]], align 4
7395 // CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
7396 // CHECK9-NEXT: store i32 [[TMP84]], i32* [[CONV18]], align 4
7397 // CHECK9-NEXT: [[TMP85:%.*]] = load i64, i64* [[CH_CASTED]], align 8
7398 // CHECK9-NEXT: [[TMP86:%.*]] = load i32, i32* [[N]], align 4
7399 // CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
7400 // CHECK9-NEXT: store i32 [[TMP86]], i32* [[CONV20]], align 4
7401 // CHECK9-NEXT: [[TMP87:%.*]] = load i64, i64* [[N_CASTED19]], align 8
7402 // CHECK9-NEXT: [[TMP88:%.*]] = load i32*, i32** [[A]], align 8
7403 // CHECK9-NEXT: [[TMP89:%.*]] = load i32*, i32** [[B]], align 8
7404 // CHECK9-NEXT: [[TMP90:%.*]] = load i32*, i32** [[C]], align 8
7405 // CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
7406 // CHECK9-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64*
7407 // CHECK9-NEXT: store i64 [[TMP85]], i64* [[TMP92]], align 8
7408 // CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
7409 // CHECK9-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64*
7410 // CHECK9-NEXT: store i64 [[TMP85]], i64* [[TMP94]], align 8
7411 // CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0
7412 // CHECK9-NEXT: store i8* null, i8** [[TMP95]], align 8
7413 // CHECK9-NEXT: [[TMP96:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
7414 // CHECK9-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64*
7415 // CHECK9-NEXT: store i64 [[TMP87]], i64* [[TMP97]], align 8
7416 // CHECK9-NEXT: [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
7417 // CHECK9-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64*
7418 // CHECK9-NEXT: store i64 [[TMP87]], i64* [[TMP99]], align 8
7419 // CHECK9-NEXT: [[TMP100:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1
7420 // CHECK9-NEXT: store i8* null, i8** [[TMP100]], align 8
7421 // CHECK9-NEXT: [[TMP101:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
7422 // CHECK9-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32**
7423 // CHECK9-NEXT: store i32* [[TMP88]], i32** [[TMP102]], align 8
7424 // CHECK9-NEXT: [[TMP103:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
7425 // CHECK9-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32**
7426 // CHECK9-NEXT: store i32* [[TMP88]], i32** [[TMP104]], align 8
7427 // CHECK9-NEXT: [[TMP105:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2
7428 // CHECK9-NEXT: store i8* null, i8** [[TMP105]], align 8
7429 // CHECK9-NEXT: [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
7430 // CHECK9-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32**
7431 // CHECK9-NEXT: store i32* [[TMP89]], i32** [[TMP107]], align 8
7432 // CHECK9-NEXT: [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
7433 // CHECK9-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32**
7434 // CHECK9-NEXT: store i32* [[TMP89]], i32** [[TMP109]], align 8
7435 // CHECK9-NEXT: [[TMP110:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3
7436 // CHECK9-NEXT: store i8* null, i8** [[TMP110]], align 8
7437 // CHECK9-NEXT: [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
7438 // CHECK9-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32**
7439 // CHECK9-NEXT: store i32* [[TMP90]], i32** [[TMP112]], align 8
7440 // CHECK9-NEXT: [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
7441 // CHECK9-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32**
7442 // CHECK9-NEXT: store i32* [[TMP90]], i32** [[TMP114]], align 8
7443 // CHECK9-NEXT: [[TMP115:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 4
7444 // CHECK9-NEXT: store i8* null, i8** [[TMP115]], align 8
7445 // CHECK9-NEXT: [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
7446 // CHECK9-NEXT: [[TMP117:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
7447 // CHECK9-NEXT: [[TMP118:%.*]] = load i32, i32* [[N]], align 4
7448 // CHECK9-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_25]], align 4
7449 // CHECK9-NEXT: [[TMP119:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
7450 // CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP119]], 0
7451 // CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
7452 // CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1
7453 // CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4
7454 // CHECK9-NEXT: [[TMP120:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4
7455 // CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP120]], 1
7456 // CHECK9-NEXT: [[TMP121:%.*]] = zext i32 [[ADD30]] to i64
7457 // CHECK9-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7458 // CHECK9-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 0
7459 // CHECK9-NEXT: store i32 1, i32* [[TMP122]], align 4
7460 // CHECK9-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 1
7461 // CHECK9-NEXT: store i32 5, i32* [[TMP123]], align 4
7462 // CHECK9-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 2
7463 // CHECK9-NEXT: store i8** [[TMP116]], i8*** [[TMP124]], align 8
7464 // CHECK9-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 3
7465 // CHECK9-NEXT: store i8** [[TMP117]], i8*** [[TMP125]], align 8
7466 // CHECK9-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 4
7467 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64** [[TMP126]], align 8
7468 // CHECK9-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 5
7469 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i64** [[TMP127]], align 8
7470 // CHECK9-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 6
7471 // CHECK9-NEXT: store i8** null, i8*** [[TMP128]], align 8
7472 // CHECK9-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 7
7473 // CHECK9-NEXT: store i8** null, i8*** [[TMP129]], align 8
7474 // CHECK9-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]], i32 0, i32 8
7475 // CHECK9-NEXT: store i64 [[TMP121]], i64* [[TMP130]], align 8
7476 // CHECK9-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS31]])
7477 // CHECK9-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0
7478 // CHECK9-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
7479 // CHECK9: omp_offload.failed32:
7480 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP85]], i64 [[TMP87]], i32* [[TMP88]], i32* [[TMP89]], i32* [[TMP90]]) #[[ATTR2]]
7481 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT33]]
7482 // CHECK9: omp_offload.cont33:
7483 // CHECK9-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
7484 // CHECK9-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32*
7485 // CHECK9-NEXT: store i32 [[TMP133]], i32* [[CONV35]], align 4
7486 // CHECK9-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED34]], align 8
7487 // CHECK9-NEXT: [[TMP135:%.*]] = load i32*, i32** [[A]], align 8
7488 // CHECK9-NEXT: [[TMP136:%.*]] = load i32*, i32** [[B]], align 8
7489 // CHECK9-NEXT: [[TMP137:%.*]] = load i32*, i32** [[C]], align 8
7490 // CHECK9-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
7491 // CHECK9-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
7492 // CHECK9-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
7493 // CHECK9-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
7494 // CHECK9-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
7495 // CHECK9-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
7496 // CHECK9-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0
7497 // CHECK9-NEXT: store i8* null, i8** [[TMP142]], align 8
7498 // CHECK9-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1
7499 // CHECK9-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32**
7500 // CHECK9-NEXT: store i32* [[TMP135]], i32** [[TMP144]], align 8
7501 // CHECK9-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1
7502 // CHECK9-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32**
7503 // CHECK9-NEXT: store i32* [[TMP135]], i32** [[TMP146]], align 8
7504 // CHECK9-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1
7505 // CHECK9-NEXT: store i8* null, i8** [[TMP147]], align 8
7506 // CHECK9-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2
7507 // CHECK9-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32**
7508 // CHECK9-NEXT: store i32* [[TMP136]], i32** [[TMP149]], align 8
7509 // CHECK9-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2
7510 // CHECK9-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
7511 // CHECK9-NEXT: store i32* [[TMP136]], i32** [[TMP151]], align 8
7512 // CHECK9-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2
7513 // CHECK9-NEXT: store i8* null, i8** [[TMP152]], align 8
7514 // CHECK9-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 3
7515 // CHECK9-NEXT: [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32**
7516 // CHECK9-NEXT: store i32* [[TMP137]], i32** [[TMP154]], align 8
7517 // CHECK9-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 3
7518 // CHECK9-NEXT: [[TMP156:%.*]] = bitcast i8** [[TMP155]] to i32**
7519 // CHECK9-NEXT: store i32* [[TMP137]], i32** [[TMP156]], align 8
7520 // CHECK9-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 3
7521 // CHECK9-NEXT: store i8* null, i8** [[TMP157]], align 8
7522 // CHECK9-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
7523 // CHECK9-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
7524 // CHECK9-NEXT: [[TMP160:%.*]] = load i32, i32* [[N]], align 4
7525 // CHECK9-NEXT: store i32 [[TMP160]], i32* [[DOTCAPTURE_EXPR_40]], align 4
7526 // CHECK9-NEXT: [[TMP161:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4
7527 // CHECK9-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP161]], 0
7528 // CHECK9-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
7529 // CHECK9-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
7530 // CHECK9-NEXT: store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4
7531 // CHECK9-NEXT: [[TMP162:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4
7532 // CHECK9-NEXT: [[ADD45:%.*]] = add nsw i32 [[TMP162]], 1
7533 // CHECK9-NEXT: [[TMP163:%.*]] = zext i32 [[ADD45]] to i64
7534 // CHECK9-NEXT: [[KERNEL_ARGS46:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7535 // CHECK9-NEXT: [[TMP164:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 0
7536 // CHECK9-NEXT: store i32 1, i32* [[TMP164]], align 4
7537 // CHECK9-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 1
7538 // CHECK9-NEXT: store i32 4, i32* [[TMP165]], align 4
7539 // CHECK9-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 2
7540 // CHECK9-NEXT: store i8** [[TMP158]], i8*** [[TMP166]], align 8
7541 // CHECK9-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 3
7542 // CHECK9-NEXT: store i8** [[TMP159]], i8*** [[TMP167]], align 8
7543 // CHECK9-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 4
7544 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64** [[TMP168]], align 8
7545 // CHECK9-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 5
7546 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i64** [[TMP169]], align 8
7547 // CHECK9-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 6
7548 // CHECK9-NEXT: store i8** null, i8*** [[TMP170]], align 8
7549 // CHECK9-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 7
7550 // CHECK9-NEXT: store i8** null, i8*** [[TMP171]], align 8
7551 // CHECK9-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]], i32 0, i32 8
7552 // CHECK9-NEXT: store i64 [[TMP163]], i64* [[TMP172]], align 8
7553 // CHECK9-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS46]])
7554 // CHECK9-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
7555 // CHECK9-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]]
7556 // CHECK9: omp_offload.failed47:
7557 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP134]], i32* [[TMP135]], i32* [[TMP136]], i32* [[TMP137]]) #[[ATTR2]]
7558 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT48]]
7559 // CHECK9: omp_offload.cont48:
7560 // CHECK9-NEXT: [[TMP175:%.*]] = load i32, i32* [[CH]], align 4
7561 // CHECK9-NEXT: [[CONV50:%.*]] = bitcast i64* [[CH_CASTED49]] to i32*
7562 // CHECK9-NEXT: store i32 [[TMP175]], i32* [[CONV50]], align 4
7563 // CHECK9-NEXT: [[TMP176:%.*]] = load i64, i64* [[CH_CASTED49]], align 8
7564 // CHECK9-NEXT: [[TMP177:%.*]] = load i32, i32* [[N]], align 4
7565 // CHECK9-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
7566 // CHECK9-NEXT: store i32 [[TMP177]], i32* [[CONV52]], align 4
7567 // CHECK9-NEXT: [[TMP178:%.*]] = load i64, i64* [[N_CASTED51]], align 8
7568 // CHECK9-NEXT: [[TMP179:%.*]] = load i32*, i32** [[A]], align 8
7569 // CHECK9-NEXT: [[TMP180:%.*]] = load i32*, i32** [[B]], align 8
7570 // CHECK9-NEXT: [[TMP181:%.*]] = load i32*, i32** [[C]], align 8
7571 // CHECK9-NEXT: [[TMP182:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0
7572 // CHECK9-NEXT: [[TMP183:%.*]] = bitcast i8** [[TMP182]] to i64*
7573 // CHECK9-NEXT: store i64 [[TMP176]], i64* [[TMP183]], align 8
7574 // CHECK9-NEXT: [[TMP184:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0
7575 // CHECK9-NEXT: [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
7576 // CHECK9-NEXT: store i64 [[TMP176]], i64* [[TMP185]], align 8
7577 // CHECK9-NEXT: [[TMP186:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0
7578 // CHECK9-NEXT: store i8* null, i8** [[TMP186]], align 8
7579 // CHECK9-NEXT: [[TMP187:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1
7580 // CHECK9-NEXT: [[TMP188:%.*]] = bitcast i8** [[TMP187]] to i64*
7581 // CHECK9-NEXT: store i64 [[TMP178]], i64* [[TMP188]], align 8
7582 // CHECK9-NEXT: [[TMP189:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1
7583 // CHECK9-NEXT: [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i64*
7584 // CHECK9-NEXT: store i64 [[TMP178]], i64* [[TMP190]], align 8
7585 // CHECK9-NEXT: [[TMP191:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1
7586 // CHECK9-NEXT: store i8* null, i8** [[TMP191]], align 8
7587 // CHECK9-NEXT: [[TMP192:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2
7588 // CHECK9-NEXT: [[TMP193:%.*]] = bitcast i8** [[TMP192]] to i32**
7589 // CHECK9-NEXT: store i32* [[TMP179]], i32** [[TMP193]], align 8
7590 // CHECK9-NEXT: [[TMP194:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2
7591 // CHECK9-NEXT: [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
7592 // CHECK9-NEXT: store i32* [[TMP179]], i32** [[TMP195]], align 8
7593 // CHECK9-NEXT: [[TMP196:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2
7594 // CHECK9-NEXT: store i8* null, i8** [[TMP196]], align 8
7595 // CHECK9-NEXT: [[TMP197:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3
7596 // CHECK9-NEXT: [[TMP198:%.*]] = bitcast i8** [[TMP197]] to i32**
7597 // CHECK9-NEXT: store i32* [[TMP180]], i32** [[TMP198]], align 8
7598 // CHECK9-NEXT: [[TMP199:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3
7599 // CHECK9-NEXT: [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
7600 // CHECK9-NEXT: store i32* [[TMP180]], i32** [[TMP200]], align 8
7601 // CHECK9-NEXT: [[TMP201:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3
7602 // CHECK9-NEXT: store i8* null, i8** [[TMP201]], align 8
7603 // CHECK9-NEXT: [[TMP202:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 4
7604 // CHECK9-NEXT: [[TMP203:%.*]] = bitcast i8** [[TMP202]] to i32**
7605 // CHECK9-NEXT: store i32* [[TMP181]], i32** [[TMP203]], align 8
7606 // CHECK9-NEXT: [[TMP204:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 4
7607 // CHECK9-NEXT: [[TMP205:%.*]] = bitcast i8** [[TMP204]] to i32**
7608 // CHECK9-NEXT: store i32* [[TMP181]], i32** [[TMP205]], align 8
7609 // CHECK9-NEXT: [[TMP206:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 4
7610 // CHECK9-NEXT: store i8* null, i8** [[TMP206]], align 8
7611 // CHECK9-NEXT: [[TMP207:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0
7612 // CHECK9-NEXT: [[TMP208:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0
7613 // CHECK9-NEXT: [[TMP209:%.*]] = load i32, i32* [[N]], align 4
7614 // CHECK9-NEXT: store i32 [[TMP209]], i32* [[DOTCAPTURE_EXPR_57]], align 4
7615 // CHECK9-NEXT: [[TMP210:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4
7616 // CHECK9-NEXT: [[SUB59:%.*]] = sub nsw i32 [[TMP210]], 0
7617 // CHECK9-NEXT: [[DIV60:%.*]] = sdiv i32 [[SUB59]], 1
7618 // CHECK9-NEXT: [[SUB61:%.*]] = sub nsw i32 [[DIV60]], 1
7619 // CHECK9-NEXT: store i32 [[SUB61]], i32* [[DOTCAPTURE_EXPR_58]], align 4
7620 // CHECK9-NEXT: [[TMP211:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4
7621 // CHECK9-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP211]], 1
7622 // CHECK9-NEXT: [[TMP212:%.*]] = zext i32 [[ADD62]] to i64
7623 // CHECK9-NEXT: [[KERNEL_ARGS63:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7624 // CHECK9-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 0
7625 // CHECK9-NEXT: store i32 1, i32* [[TMP213]], align 4
7626 // CHECK9-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 1
7627 // CHECK9-NEXT: store i32 5, i32* [[TMP214]], align 4
7628 // CHECK9-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 2
7629 // CHECK9-NEXT: store i8** [[TMP207]], i8*** [[TMP215]], align 8
7630 // CHECK9-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 3
7631 // CHECK9-NEXT: store i8** [[TMP208]], i8*** [[TMP216]], align 8
7632 // CHECK9-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 4
7633 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64** [[TMP217]], align 8
7634 // CHECK9-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 5
7635 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i64** [[TMP218]], align 8
7636 // CHECK9-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 6
7637 // CHECK9-NEXT: store i8** null, i8*** [[TMP219]], align 8
7638 // CHECK9-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 7
7639 // CHECK9-NEXT: store i8** null, i8*** [[TMP220]], align 8
7640 // CHECK9-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]], i32 0, i32 8
7641 // CHECK9-NEXT: store i64 [[TMP212]], i64* [[TMP221]], align 8
7642 // CHECK9-NEXT: [[TMP222:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS63]])
7643 // CHECK9-NEXT: [[TMP223:%.*]] = icmp ne i32 [[TMP222]], 0
7644 // CHECK9-NEXT: br i1 [[TMP223]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]]
7645 // CHECK9: omp_offload.failed64:
7646 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP176]], i64 [[TMP178]], i32* [[TMP179]], i32* [[TMP180]], i32* [[TMP181]]) #[[ATTR2]]
7647 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT65]]
7648 // CHECK9: omp_offload.cont65:
7649 // CHECK9-NEXT: [[TMP224:%.*]] = load i32, i32* [[N]], align 4
7650 // CHECK9-NEXT: [[CONV67:%.*]] = bitcast i64* [[N_CASTED66]] to i32*
7651 // CHECK9-NEXT: store i32 [[TMP224]], i32* [[CONV67]], align 4
7652 // CHECK9-NEXT: [[TMP225:%.*]] = load i64, i64* [[N_CASTED66]], align 8
7653 // CHECK9-NEXT: [[TMP226:%.*]] = load i32*, i32** [[A]], align 8
7654 // CHECK9-NEXT: [[TMP227:%.*]] = load i32*, i32** [[B]], align 8
7655 // CHECK9-NEXT: [[TMP228:%.*]] = load i32*, i32** [[C]], align 8
7656 // CHECK9-NEXT: [[TMP229:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 0
7657 // CHECK9-NEXT: [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i64*
7658 // CHECK9-NEXT: store i64 [[TMP225]], i64* [[TMP230]], align 8
7659 // CHECK9-NEXT: [[TMP231:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 0
7660 // CHECK9-NEXT: [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i64*
7661 // CHECK9-NEXT: store i64 [[TMP225]], i64* [[TMP232]], align 8
7662 // CHECK9-NEXT: [[TMP233:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 0
7663 // CHECK9-NEXT: store i8* null, i8** [[TMP233]], align 8
7664 // CHECK9-NEXT: [[TMP234:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 1
7665 // CHECK9-NEXT: [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
7666 // CHECK9-NEXT: store i32* [[TMP226]], i32** [[TMP235]], align 8
7667 // CHECK9-NEXT: [[TMP236:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 1
7668 // CHECK9-NEXT: [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
7669 // CHECK9-NEXT: store i32* [[TMP226]], i32** [[TMP237]], align 8
7670 // CHECK9-NEXT: [[TMP238:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 1
7671 // CHECK9-NEXT: store i8* null, i8** [[TMP238]], align 8
7672 // CHECK9-NEXT: [[TMP239:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 2
7673 // CHECK9-NEXT: [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
7674 // CHECK9-NEXT: store i32* [[TMP227]], i32** [[TMP240]], align 8
7675 // CHECK9-NEXT: [[TMP241:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 2
7676 // CHECK9-NEXT: [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
7677 // CHECK9-NEXT: store i32* [[TMP227]], i32** [[TMP242]], align 8
7678 // CHECK9-NEXT: [[TMP243:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 2
7679 // CHECK9-NEXT: store i8* null, i8** [[TMP243]], align 8
7680 // CHECK9-NEXT: [[TMP244:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 3
7681 // CHECK9-NEXT: [[TMP245:%.*]] = bitcast i8** [[TMP244]] to i32**
7682 // CHECK9-NEXT: store i32* [[TMP228]], i32** [[TMP245]], align 8
7683 // CHECK9-NEXT: [[TMP246:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 3
7684 // CHECK9-NEXT: [[TMP247:%.*]] = bitcast i8** [[TMP246]] to i32**
7685 // CHECK9-NEXT: store i32* [[TMP228]], i32** [[TMP247]], align 8
7686 // CHECK9-NEXT: [[TMP248:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS70]], i64 0, i64 3
7687 // CHECK9-NEXT: store i8* null, i8** [[TMP248]], align 8
7688 // CHECK9-NEXT: [[TMP249:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS68]], i32 0, i32 0
7689 // CHECK9-NEXT: [[TMP250:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS69]], i32 0, i32 0
7690 // CHECK9-NEXT: [[TMP251:%.*]] = load i32, i32* [[N]], align 4
7691 // CHECK9-NEXT: store i32 [[TMP251]], i32* [[DOTCAPTURE_EXPR_72]], align 4
7692 // CHECK9-NEXT: [[TMP252:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_72]], align 4
7693 // CHECK9-NEXT: [[SUB74:%.*]] = sub nsw i32 [[TMP252]], 0
7694 // CHECK9-NEXT: [[DIV75:%.*]] = sdiv i32 [[SUB74]], 1
7695 // CHECK9-NEXT: [[SUB76:%.*]] = sub nsw i32 [[DIV75]], 1
7696 // CHECK9-NEXT: store i32 [[SUB76]], i32* [[DOTCAPTURE_EXPR_73]], align 4
7697 // CHECK9-NEXT: [[TMP253:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_73]], align 4
7698 // CHECK9-NEXT: [[ADD77:%.*]] = add nsw i32 [[TMP253]], 1
7699 // CHECK9-NEXT: [[TMP254:%.*]] = zext i32 [[ADD77]] to i64
7700 // CHECK9-NEXT: [[KERNEL_ARGS78:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7701 // CHECK9-NEXT: [[TMP255:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 0
7702 // CHECK9-NEXT: store i32 1, i32* [[TMP255]], align 4
7703 // CHECK9-NEXT: [[TMP256:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 1
7704 // CHECK9-NEXT: store i32 4, i32* [[TMP256]], align 4
7705 // CHECK9-NEXT: [[TMP257:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 2
7706 // CHECK9-NEXT: store i8** [[TMP249]], i8*** [[TMP257]], align 8
7707 // CHECK9-NEXT: [[TMP258:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 3
7708 // CHECK9-NEXT: store i8** [[TMP250]], i8*** [[TMP258]], align 8
7709 // CHECK9-NEXT: [[TMP259:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 4
7710 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64** [[TMP259]], align 8
7711 // CHECK9-NEXT: [[TMP260:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 5
7712 // CHECK9-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i64** [[TMP260]], align 8
7713 // CHECK9-NEXT: [[TMP261:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 6
7714 // CHECK9-NEXT: store i8** null, i8*** [[TMP261]], align 8
7715 // CHECK9-NEXT: [[TMP262:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 7
7716 // CHECK9-NEXT: store i8** null, i8*** [[TMP262]], align 8
7717 // CHECK9-NEXT: [[TMP263:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]], i32 0, i32 8
7718 // CHECK9-NEXT: store i64 [[TMP254]], i64* [[TMP263]], align 8
7719 // CHECK9-NEXT: [[TMP264:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS78]])
7720 // CHECK9-NEXT: [[TMP265:%.*]] = icmp ne i32 [[TMP264]], 0
7721 // CHECK9-NEXT: br i1 [[TMP265]], label [[OMP_OFFLOAD_FAILED79:%.*]], label [[OMP_OFFLOAD_CONT80:%.*]]
7722 // CHECK9: omp_offload.failed79:
7723 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP225]], i32* [[TMP226]], i32* [[TMP227]], i32* [[TMP228]]) #[[ATTR2]]
7724 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT80]]
7725 // CHECK9: omp_offload.cont80:
7726 // CHECK9-NEXT: [[TMP266:%.*]] = load i32, i32* [[CH]], align 4
7727 // CHECK9-NEXT: [[CONV82:%.*]] = bitcast i64* [[CH_CASTED81]] to i32*
7728 // CHECK9-NEXT: store i32 [[TMP266]], i32* [[CONV82]], align 4
7729 // CHECK9-NEXT: [[TMP267:%.*]] = load i64, i64* [[CH_CASTED81]], align 8
7730 // CHECK9-NEXT: [[TMP268:%.*]] = load i32, i32* [[N]], align 4
7731 // CHECK9-NEXT: [[CONV84:%.*]] = bitcast i64* [[N_CASTED83]] to i32*
7732 // CHECK9-NEXT: store i32 [[TMP268]], i32* [[CONV84]], align 4
7733 // CHECK9-NEXT: [[TMP269:%.*]] = load i64, i64* [[N_CASTED83]], align 8
7734 // CHECK9-NEXT: [[TMP270:%.*]] = load i32*, i32** [[A]], align 8
7735 // CHECK9-NEXT: [[TMP271:%.*]] = load i32*, i32** [[B]], align 8
7736 // CHECK9-NEXT: [[TMP272:%.*]] = load i32*, i32** [[C]], align 8
7737 // CHECK9-NEXT: [[TMP273:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 0
7738 // CHECK9-NEXT: [[TMP274:%.*]] = bitcast i8** [[TMP273]] to i64*
7739 // CHECK9-NEXT: store i64 [[TMP267]], i64* [[TMP274]], align 8
7740 // CHECK9-NEXT: [[TMP275:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 0
7741 // CHECK9-NEXT: [[TMP276:%.*]] = bitcast i8** [[TMP275]] to i64*
7742 // CHECK9-NEXT: store i64 [[TMP267]], i64* [[TMP276]], align 8
7743 // CHECK9-NEXT: [[TMP277:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 0
7744 // CHECK9-NEXT: store i8* null, i8** [[TMP277]], align 8
7745 // CHECK9-NEXT: [[TMP278:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 1
7746 // CHECK9-NEXT: [[TMP279:%.*]] = bitcast i8** [[TMP278]] to i64*
7747 // CHECK9-NEXT: store i64 [[TMP269]], i64* [[TMP279]], align 8
7748 // CHECK9-NEXT: [[TMP280:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 1
7749 // CHECK9-NEXT: [[TMP281:%.*]] = bitcast i8** [[TMP280]] to i64*
7750 // CHECK9-NEXT: store i64 [[TMP269]], i64* [[TMP281]], align 8
7751 // CHECK9-NEXT: [[TMP282:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 1
7752 // CHECK9-NEXT: store i8* null, i8** [[TMP282]], align 8
7753 // CHECK9-NEXT: [[TMP283:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 2
7754 // CHECK9-NEXT: [[TMP284:%.*]] = bitcast i8** [[TMP283]] to i32**
7755 // CHECK9-NEXT: store i32* [[TMP270]], i32** [[TMP284]], align 8
7756 // CHECK9-NEXT: [[TMP285:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 2
7757 // CHECK9-NEXT: [[TMP286:%.*]] = bitcast i8** [[TMP285]] to i32**
7758 // CHECK9-NEXT: store i32* [[TMP270]], i32** [[TMP286]], align 8
7759 // CHECK9-NEXT: [[TMP287:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 2
7760 // CHECK9-NEXT: store i8* null, i8** [[TMP287]], align 8
7761 // CHECK9-NEXT: [[TMP288:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 3
7762 // CHECK9-NEXT: [[TMP289:%.*]] = bitcast i8** [[TMP288]] to i32**
7763 // CHECK9-NEXT: store i32* [[TMP271]], i32** [[TMP289]], align 8
7764 // CHECK9-NEXT: [[TMP290:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 3
7765 // CHECK9-NEXT: [[TMP291:%.*]] = bitcast i8** [[TMP290]] to i32**
7766 // CHECK9-NEXT: store i32* [[TMP271]], i32** [[TMP291]], align 8
7767 // CHECK9-NEXT: [[TMP292:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 3
7768 // CHECK9-NEXT: store i8* null, i8** [[TMP292]], align 8
7769 // CHECK9-NEXT: [[TMP293:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 4
7770 // CHECK9-NEXT: [[TMP294:%.*]] = bitcast i8** [[TMP293]] to i32**
7771 // CHECK9-NEXT: store i32* [[TMP272]], i32** [[TMP294]], align 8
7772 // CHECK9-NEXT: [[TMP295:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 4
7773 // CHECK9-NEXT: [[TMP296:%.*]] = bitcast i8** [[TMP295]] to i32**
7774 // CHECK9-NEXT: store i32* [[TMP272]], i32** [[TMP296]], align 8
7775 // CHECK9-NEXT: [[TMP297:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS87]], i64 0, i64 4
7776 // CHECK9-NEXT: store i8* null, i8** [[TMP297]], align 8
7777 // CHECK9-NEXT: [[TMP298:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS85]], i32 0, i32 0
7778 // CHECK9-NEXT: [[TMP299:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS86]], i32 0, i32 0
7779 // CHECK9-NEXT: [[TMP300:%.*]] = load i32, i32* [[N]], align 4
7780 // CHECK9-NEXT: store i32 [[TMP300]], i32* [[DOTCAPTURE_EXPR_89]], align 4
7781 // CHECK9-NEXT: [[TMP301:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_89]], align 4
7782 // CHECK9-NEXT: [[SUB91:%.*]] = sub nsw i32 [[TMP301]], 0
7783 // CHECK9-NEXT: [[DIV92:%.*]] = sdiv i32 [[SUB91]], 1
7784 // CHECK9-NEXT: [[SUB93:%.*]] = sub nsw i32 [[DIV92]], 1
7785 // CHECK9-NEXT: store i32 [[SUB93]], i32* [[DOTCAPTURE_EXPR_90]], align 4
7786 // CHECK9-NEXT: [[TMP302:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_90]], align 4
7787 // CHECK9-NEXT: [[ADD94:%.*]] = add nsw i32 [[TMP302]], 1
7788 // CHECK9-NEXT: [[TMP303:%.*]] = zext i32 [[ADD94]] to i64
7789 // CHECK9-NEXT: [[KERNEL_ARGS95:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7790 // CHECK9-NEXT: [[TMP304:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 0
7791 // CHECK9-NEXT: store i32 1, i32* [[TMP304]], align 4
7792 // CHECK9-NEXT: [[TMP305:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 1
7793 // CHECK9-NEXT: store i32 5, i32* [[TMP305]], align 4
7794 // CHECK9-NEXT: [[TMP306:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 2
7795 // CHECK9-NEXT: store i8** [[TMP298]], i8*** [[TMP306]], align 8
7796 // CHECK9-NEXT: [[TMP307:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 3
7797 // CHECK9-NEXT: store i8** [[TMP299]], i8*** [[TMP307]], align 8
7798 // CHECK9-NEXT: [[TMP308:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 4
7799 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64** [[TMP308]], align 8
7800 // CHECK9-NEXT: [[TMP309:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 5
7801 // CHECK9-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i64** [[TMP309]], align 8
7802 // CHECK9-NEXT: [[TMP310:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 6
7803 // CHECK9-NEXT: store i8** null, i8*** [[TMP310]], align 8
7804 // CHECK9-NEXT: [[TMP311:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 7
7805 // CHECK9-NEXT: store i8** null, i8*** [[TMP311]], align 8
7806 // CHECK9-NEXT: [[TMP312:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]], i32 0, i32 8
7807 // CHECK9-NEXT: store i64 [[TMP303]], i64* [[TMP312]], align 8
7808 // CHECK9-NEXT: [[TMP313:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS95]])
7809 // CHECK9-NEXT: [[TMP314:%.*]] = icmp ne i32 [[TMP313]], 0
7810 // CHECK9-NEXT: br i1 [[TMP314]], label [[OMP_OFFLOAD_FAILED96:%.*]], label [[OMP_OFFLOAD_CONT97:%.*]]
7811 // CHECK9: omp_offload.failed96:
7812 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP267]], i64 [[TMP269]], i32* [[TMP270]], i32* [[TMP271]], i32* [[TMP272]]) #[[ATTR2]]
7813 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT97]]
7814 // CHECK9: omp_offload.cont97:
7815 // CHECK9-NEXT: ret i32 0
7816 //
7817 //
7818 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
7819 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
7820 // CHECK9-NEXT: entry:
7821 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
7822 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
7823 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
7824 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
7825 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
7826 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
7827 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
7828 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
7829 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7830 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
7831 // CHECK9-NEXT: ret void
7832 //
7833 //
7834 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..26
7835 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7836 // CHECK9-NEXT: entry:
7837 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7838 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7839 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
7840 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
7841 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
7842 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
7843 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7844 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
7845 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7846 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7847 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
7848 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7849 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7850 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7851 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7852 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
7853 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7854 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7855 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
7856 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
7857 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
7858 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
7859 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7860 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7861 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7862 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7863 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7864 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7865 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7866 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7867 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7868 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7869 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7870 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
7871 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7872 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7873 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7874 // CHECK9: omp.precond.then:
7875 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7876 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7877 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
7878 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7879 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7880 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7881 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7882 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7883 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7884 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7885 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
7886 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7887 // CHECK9: cond.true:
7888 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7889 // CHECK9-NEXT: br label [[COND_END:%.*]]
7890 // CHECK9: cond.false:
7891 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7892 // CHECK9-NEXT: br label [[COND_END]]
7893 // CHECK9: cond.end:
7894 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7895 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7896 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7897 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
7898 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7899 // CHECK9: omp.inner.for.cond:
7900 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7901 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
7902 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
7903 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7904 // CHECK9: omp.inner.for.body:
7905 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
7906 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
7907 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
7908 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
7909 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
7910 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7911 // CHECK9: omp.inner.for.inc:
7912 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7913 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
7914 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7915 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7916 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
7917 // CHECK9: omp.inner.for.end:
7918 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7919 // CHECK9: omp.loop.exit:
7920 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7921 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
7922 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
7923 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7924 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
7925 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7926 // CHECK9: .omp.final.then:
7927 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7928 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
7929 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
7930 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
7931 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
7932 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
7933 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
7934 // CHECK9: .omp.final.done:
7935 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
7936 // CHECK9: omp.precond.end:
7937 // CHECK9-NEXT: ret void
7938 //
7939 //
7940 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..27
7941 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7942 // CHECK9-NEXT: entry:
7943 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7944 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7945 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7946 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7947 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
7948 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
7949 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
7950 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
7951 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7952 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
7953 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7954 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7955 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
7956 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7957 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7958 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7959 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7960 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
7961 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7962 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7963 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7964 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7965 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
7966 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
7967 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
7968 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
7969 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7970 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7971 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7972 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7973 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7974 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7975 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7976 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7977 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7978 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7979 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7980 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
7981 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7982 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7983 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7984 // CHECK9: omp.precond.then:
7985 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
7986 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7987 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7988 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7989 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
7990 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7991 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
7992 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
7993 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
7994 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7995 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7996 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7997 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7998 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7999 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8000 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8001 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8002 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8003 // CHECK9: cond.true:
8004 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8005 // CHECK9-NEXT: br label [[COND_END:%.*]]
8006 // CHECK9: cond.false:
8007 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8008 // CHECK9-NEXT: br label [[COND_END]]
8009 // CHECK9: cond.end:
8010 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8011 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8012 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8013 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8014 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8015 // CHECK9: omp.inner.for.cond:
8016 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
8017 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
8018 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8019 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8020 // CHECK9: omp.inner.for.body:
8021 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
8022 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8023 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8024 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
8025 // CHECK9-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
8026 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
8027 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8028 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8029 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
8030 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
8031 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
8032 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8033 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8034 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
8035 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8036 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
8037 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
8038 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8039 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8040 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
8041 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8042 // CHECK9: omp.body.continue:
8043 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8044 // CHECK9: omp.inner.for.inc:
8045 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
8046 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8047 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
8048 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
8049 // CHECK9: omp.inner.for.end:
8050 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8051 // CHECK9: omp.loop.exit:
8052 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8053 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8054 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8055 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8056 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8057 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8058 // CHECK9: .omp.final.then:
8059 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8060 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8061 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8062 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8063 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8064 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
8065 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8066 // CHECK9: .omp.final.done:
8067 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8068 // CHECK9: omp.precond.end:
8069 // CHECK9-NEXT: ret void
8070 //
8071 //
8072 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
8073 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8074 // CHECK9-NEXT: entry:
8075 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8076 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
8077 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
8078 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
8079 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
8080 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
8081 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
8082 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
8083 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8084 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8085 // CHECK9-NEXT: ret void
8086 //
8087 //
8088 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..30
8089 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8090 // CHECK9-NEXT: entry:
8091 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8092 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8093 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8094 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8095 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8096 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8097 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8098 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8099 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8100 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8101 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8102 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8103 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8104 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8105 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8106 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
8107 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8108 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8109 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8110 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8111 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8112 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8113 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8114 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8115 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8116 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8117 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8118 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8119 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8120 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8121 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8122 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8123 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8124 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8125 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8126 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8127 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8128 // CHECK9: omp.precond.then:
8129 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8130 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8131 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8132 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8133 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8134 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8135 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8136 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8137 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8138 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8139 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8140 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8141 // CHECK9: cond.true:
8142 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8143 // CHECK9-NEXT: br label [[COND_END:%.*]]
8144 // CHECK9: cond.false:
8145 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8146 // CHECK9-NEXT: br label [[COND_END]]
8147 // CHECK9: cond.end:
8148 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8149 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8150 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8151 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8152 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8153 // CHECK9: omp.inner.for.cond:
8154 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
8155 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
8156 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8157 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8158 // CHECK9: omp.inner.for.body:
8159 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
8160 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8161 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
8162 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8163 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
8164 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8165 // CHECK9: omp.inner.for.inc:
8166 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
8167 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
8168 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
8169 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
8170 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
8171 // CHECK9: omp.inner.for.end:
8172 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8173 // CHECK9: omp.loop.exit:
8174 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8175 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
8176 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
8177 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8178 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
8179 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8180 // CHECK9: .omp.final.then:
8181 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8182 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
8183 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8184 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8185 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8186 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
8187 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8188 // CHECK9: .omp.final.done:
8189 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8190 // CHECK9: omp.precond.end:
8191 // CHECK9-NEXT: ret void
8192 //
8193 //
8194 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..31
8195 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8196 // CHECK9-NEXT: entry:
8197 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8198 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8199 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8200 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8201 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8202 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8203 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8204 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8205 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8206 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8207 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8208 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8209 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8210 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8211 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8212 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8213 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8214 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
8215 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8216 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8217 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8218 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8219 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8220 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8221 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8222 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8223 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8224 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8225 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8226 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8227 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8228 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8229 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8230 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8231 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8232 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8233 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8234 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8235 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8236 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8237 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8238 // CHECK9: omp.precond.then:
8239 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
8240 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8241 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8242 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8243 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
8244 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8245 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
8246 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8247 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8248 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8249 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8250 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8251 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8252 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8253 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8254 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8255 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8256 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8257 // CHECK9: cond.true:
8258 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8259 // CHECK9-NEXT: br label [[COND_END:%.*]]
8260 // CHECK9: cond.false:
8261 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8262 // CHECK9-NEXT: br label [[COND_END]]
8263 // CHECK9: cond.end:
8264 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8265 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8266 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8267 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8268 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8269 // CHECK9: omp.inner.for.cond:
8270 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8271 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
8272 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8273 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8274 // CHECK9: omp.inner.for.body:
8275 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8276 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8277 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8278 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
8279 // CHECK9-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
8280 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8281 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8282 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8283 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
8284 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
8285 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8286 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8287 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8288 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
8289 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8290 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
8291 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8292 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8293 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8294 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
8295 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8296 // CHECK9: omp.body.continue:
8297 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8298 // CHECK9: omp.inner.for.inc:
8299 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8300 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8301 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8302 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
8303 // CHECK9: omp.inner.for.end:
8304 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8305 // CHECK9: omp.loop.exit:
8306 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8307 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8308 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8309 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8310 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8311 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8312 // CHECK9: .omp.final.then:
8313 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8314 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8315 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8316 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8317 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8318 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
8319 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8320 // CHECK9: .omp.final.done:
8321 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8322 // CHECK9: omp.precond.end:
8323 // CHECK9-NEXT: ret void
8324 //
8325 //
8326 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
8327 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8328 // CHECK9-NEXT: entry:
8329 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
8330 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8331 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
8332 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
8333 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
8334 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
8335 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
8336 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
8337 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
8338 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
8339 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
8340 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8341 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8342 // CHECK9-NEXT: ret void
8343 //
8344 //
8345 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..34
8346 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8347 // CHECK9-NEXT: entry:
8348 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8349 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8350 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
8351 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8352 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8353 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8354 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8355 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8356 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8357 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8358 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8359 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8360 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8361 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8362 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8363 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8364 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
8365 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8366 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8367 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
8368 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8369 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8370 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8371 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8372 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
8373 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8374 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8375 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8376 // CHECK9-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8377 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
8378 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8379 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8380 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
8381 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8382 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8383 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8384 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8385 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8386 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
8387 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8388 // CHECK9: omp.precond.then:
8389 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8390 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8391 // CHECK9-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
8392 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8393 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8394 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
8395 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8396 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8397 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
8398 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8399 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8400 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8401 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8402 // CHECK9: cond.true:
8403 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8404 // CHECK9-NEXT: br label [[COND_END:%.*]]
8405 // CHECK9: cond.false:
8406 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8407 // CHECK9-NEXT: br label [[COND_END]]
8408 // CHECK9: cond.end:
8409 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8410 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8411 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8412 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8413 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8414 // CHECK9: omp.inner.for.cond:
8415 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8416 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8417 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
8418 // CHECK9-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
8419 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8420 // CHECK9: omp.inner.for.body:
8421 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8422 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8423 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8424 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
8425 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
8426 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8427 // CHECK9: omp.inner.for.inc:
8428 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8429 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8430 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8431 // CHECK9-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8432 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8433 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8434 // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8435 // CHECK9-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8436 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8437 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8438 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
8439 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8440 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8441 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8442 // CHECK9-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
8443 // CHECK9-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
8444 // CHECK9: cond.true10:
8445 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8446 // CHECK9-NEXT: br label [[COND_END12:%.*]]
8447 // CHECK9: cond.false11:
8448 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8449 // CHECK9-NEXT: br label [[COND_END12]]
8450 // CHECK9: cond.end12:
8451 // CHECK9-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
8452 // CHECK9-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8453 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8454 // CHECK9-NEXT: store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8455 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
8456 // CHECK9: omp.inner.for.end:
8457 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8458 // CHECK9: omp.loop.exit:
8459 // CHECK9-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8460 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
8461 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
8462 // CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8463 // CHECK9-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
8464 // CHECK9-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8465 // CHECK9: .omp.final.then:
8466 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8467 // CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
8468 // CHECK9-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
8469 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
8470 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
8471 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
8472 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8473 // CHECK9: .omp.final.done:
8474 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8475 // CHECK9: omp.precond.end:
8476 // CHECK9-NEXT: ret void
8477 //
8478 //
8479 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..35
8480 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8481 // CHECK9-NEXT: entry:
8482 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8483 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8484 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8485 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8486 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8487 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8488 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8489 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8490 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8491 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8492 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8493 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8494 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8495 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8496 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8497 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8498 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8499 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
8500 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8501 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8502 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8503 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8504 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8505 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8506 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8507 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8508 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8509 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8510 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8511 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8512 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8513 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8514 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8515 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8516 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8517 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8518 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8519 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8520 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8521 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8522 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8523 // CHECK9: omp.precond.then:
8524 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
8525 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8526 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8527 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8528 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
8529 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8530 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
8531 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8532 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8533 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8534 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8535 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8536 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8537 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8538 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8539 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8540 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8541 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8542 // CHECK9: cond.true:
8543 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8544 // CHECK9-NEXT: br label [[COND_END:%.*]]
8545 // CHECK9: cond.false:
8546 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8547 // CHECK9-NEXT: br label [[COND_END]]
8548 // CHECK9: cond.end:
8549 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8550 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8551 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8552 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8553 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8554 // CHECK9: omp.inner.for.cond:
8555 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8556 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
8557 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8558 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8559 // CHECK9: omp.inner.for.body:
8560 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8561 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8562 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8563 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
8564 // CHECK9-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
8565 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8566 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8567 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8568 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
8569 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
8570 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8571 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8572 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8573 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
8574 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8575 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
8576 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8577 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8578 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8579 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
8580 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8581 // CHECK9: omp.body.continue:
8582 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8583 // CHECK9: omp.inner.for.inc:
8584 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8585 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8586 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8587 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
8588 // CHECK9: omp.inner.for.end:
8589 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8590 // CHECK9: omp.loop.exit:
8591 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8592 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8593 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8594 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8595 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8596 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8597 // CHECK9: .omp.final.then:
8598 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8599 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8600 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8601 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8602 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8603 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
8604 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8605 // CHECK9: .omp.final.done:
8606 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8607 // CHECK9: omp.precond.end:
8608 // CHECK9-NEXT: ret void
8609 //
8610 //
8611 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
8612 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8613 // CHECK9-NEXT: entry:
8614 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8615 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
8616 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
8617 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
8618 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
8619 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
8620 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
8621 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
8622 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8623 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8624 // CHECK9-NEXT: ret void
8625 //
8626 //
8627 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..38
8628 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8629 // CHECK9-NEXT: entry:
8630 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8631 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8632 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8633 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8634 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8635 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8636 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8637 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8638 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8639 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8640 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8641 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8642 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8643 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8644 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8645 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
8646 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8647 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8648 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8649 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8650 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8651 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8652 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8653 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8654 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8655 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8656 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8657 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8658 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8659 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8660 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8661 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8662 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8663 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8664 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8665 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8666 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8667 // CHECK9: omp.precond.then:
8668 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8669 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8670 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8671 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8672 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8673 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8674 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8675 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8676 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8677 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8678 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8679 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8680 // CHECK9: cond.true:
8681 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8682 // CHECK9-NEXT: br label [[COND_END:%.*]]
8683 // CHECK9: cond.false:
8684 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8685 // CHECK9-NEXT: br label [[COND_END]]
8686 // CHECK9: cond.end:
8687 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8688 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8689 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8690 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8691 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8692 // CHECK9: omp.inner.for.cond:
8693 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8694 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
8695 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8696 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8697 // CHECK9: omp.inner.for.body:
8698 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
8699 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8700 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
8701 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8702 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
8703 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8704 // CHECK9: omp.inner.for.inc:
8705 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8706 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
8707 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
8708 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8709 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
8710 // CHECK9: omp.inner.for.end:
8711 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8712 // CHECK9: omp.loop.exit:
8713 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8714 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
8715 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
8716 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8717 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
8718 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8719 // CHECK9: .omp.final.then:
8720 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8721 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
8722 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8723 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8724 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8725 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
8726 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8727 // CHECK9: .omp.final.done:
8728 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8729 // CHECK9: omp.precond.end:
8730 // CHECK9-NEXT: ret void
8731 //
8732 //
8733 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..39
8734 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8735 // CHECK9-NEXT: entry:
8736 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8737 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8738 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8739 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8740 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8741 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8742 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8743 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8744 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8745 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8746 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8747 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8748 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8749 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8750 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8751 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8752 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8753 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
8754 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8755 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8756 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8757 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8758 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8759 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8760 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8761 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8762 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8763 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8764 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8765 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8766 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8767 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8768 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8769 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8770 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8771 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8772 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8773 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8774 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8775 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8776 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8777 // CHECK9: omp.precond.then:
8778 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
8779 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8780 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8781 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8782 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
8783 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8784 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
8785 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8786 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8787 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8788 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8789 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8790 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8791 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8792 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8793 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8794 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8795 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8796 // CHECK9: cond.true:
8797 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8798 // CHECK9-NEXT: br label [[COND_END:%.*]]
8799 // CHECK9: cond.false:
8800 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8801 // CHECK9-NEXT: br label [[COND_END]]
8802 // CHECK9: cond.end:
8803 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8804 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8805 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8806 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8807 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8808 // CHECK9: omp.inner.for.cond:
8809 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8810 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
8811 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8812 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8813 // CHECK9: omp.inner.for.body:
8814 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8815 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8816 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8817 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
8818 // CHECK9-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
8819 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8820 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8821 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8822 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
8823 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
8824 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8825 // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8826 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8827 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
8828 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8829 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
8830 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8831 // CHECK9-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8832 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8833 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
8834 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8835 // CHECK9: omp.body.continue:
8836 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8837 // CHECK9: omp.inner.for.inc:
8838 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8839 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8840 // CHECK9-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8841 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
8842 // CHECK9: omp.inner.for.end:
8843 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8844 // CHECK9: omp.loop.exit:
8845 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8846 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8847 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8848 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8849 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8850 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8851 // CHECK9: .omp.final.then:
8852 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8853 // CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8854 // CHECK9-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8855 // CHECK9-NEXT: [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8856 // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8857 // CHECK9-NEXT: store i32 [[ADD16]], i32* [[I4]], align 4
8858 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8859 // CHECK9: .omp.final.done:
8860 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8861 // CHECK9: omp.precond.end:
8862 // CHECK9-NEXT: ret void
8863 //
8864 //
8865 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
8866 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8867 // CHECK9-NEXT: entry:
8868 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
8869 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8870 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
8871 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
8872 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
8873 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
8874 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
8875 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
8876 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
8877 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
8878 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
8879 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8880 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8881 // CHECK9-NEXT: ret void
8882 //
8883 //
8884 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..42
8885 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8886 // CHECK9-NEXT: entry:
8887 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8888 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8889 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
8890 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
8891 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
8892 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
8893 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
8894 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8895 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8896 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
8897 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8898 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8899 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
8900 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8901 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8902 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8903 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8904 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
8905 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8906 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8907 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8908 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
8909 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
8910 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
8911 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
8912 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
8913 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
8914 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8915 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8916 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8917 // CHECK9-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8918 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
8919 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8920 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
8921 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8922 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8923 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
8924 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8925 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8926 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8927 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
8928 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8929 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
8930 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8931 // CHECK9: omp.precond.then:
8932 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8933 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8934 // CHECK9-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
8935 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8936 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8937 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8938 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8939 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8940 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8941 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8942 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8943 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8944 // CHECK9: cond.true:
8945 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8946 // CHECK9-NEXT: br label [[COND_END:%.*]]
8947 // CHECK9: cond.false:
8948 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8949 // CHECK9-NEXT: br label [[COND_END]]
8950 // CHECK9: cond.end:
8951 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8952 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8953 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8954 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8955 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8956 // CHECK9: omp.inner.for.cond:
8957 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8958 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
8959 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8960 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8961 // CHECK9: omp.inner.for.body:
8962 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
8963 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8964 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
8965 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
8966 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
8967 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
8968 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
8969 // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
8970 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
8971 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8972 // CHECK9: omp.inner.for.inc:
8973 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8974 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
8975 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8976 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8977 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
8978 // CHECK9: omp.inner.for.end:
8979 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8980 // CHECK9: omp.loop.exit:
8981 // CHECK9-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8982 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
8983 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
8984 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8985 // CHECK9-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
8986 // CHECK9-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8987 // CHECK9: .omp.final.then:
8988 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8989 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
8990 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
8991 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
8992 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
8993 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
8994 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
8995 // CHECK9: .omp.final.done:
8996 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
8997 // CHECK9: omp.precond.end:
8998 // CHECK9-NEXT: ret void
8999 //
9000 //
9001 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..43
9002 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
9003 // CHECK9-NEXT: entry:
9004 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9005 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9006 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9007 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9008 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
9009 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
9010 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
9011 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
9012 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9013 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9014 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
9015 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9016 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9017 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
9018 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9019 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9020 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9021 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9022 // CHECK9-NEXT: [[I6:%.*]] = alloca i32, align 4
9023 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9024 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9025 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9026 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9027 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
9028 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
9029 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
9030 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
9031 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9032 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9033 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9034 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9035 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9036 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9037 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9038 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9039 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9040 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9041 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9042 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9043 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9044 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
9045 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9046 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9047 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9048 // CHECK9: omp.precond.then:
9049 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
9050 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9051 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9052 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9053 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
9054 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9055 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
9056 // CHECK9-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
9057 // CHECK9-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
9058 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9059 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9060 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
9061 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9062 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
9063 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
9064 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9065 // CHECK9: omp.dispatch.cond:
9066 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9067 // CHECK9-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9068 // CHECK9-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
9069 // CHECK9-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
9070 // CHECK9-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9071 // CHECK9: cond.true:
9072 // CHECK9-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9073 // CHECK9-NEXT: [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
9074 // CHECK9-NEXT: br label [[COND_END:%.*]]
9075 // CHECK9: cond.false:
9076 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9077 // CHECK9-NEXT: br label [[COND_END]]
9078 // CHECK9: cond.end:
9079 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
9080 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9081 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9082 // CHECK9-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
9083 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9084 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9085 // CHECK9-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
9086 // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9087 // CHECK9: omp.dispatch.body:
9088 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9089 // CHECK9: omp.inner.for.cond:
9090 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
9091 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
9092 // CHECK9-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
9093 // CHECK9-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9094 // CHECK9: omp.inner.for.body:
9095 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
9096 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
9097 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9098 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
9099 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
9100 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
9101 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
9102 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
9103 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
9104 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
9105 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
9106 // CHECK9-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
9107 // CHECK9-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM12]]
9108 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !89
9109 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
9110 // CHECK9-NEXT: [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
9111 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
9112 // CHECK9-NEXT: [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
9113 // CHECK9-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM15]]
9114 // CHECK9-NEXT: store i32 [[ADD14]], i32* [[ARRAYIDX16]], align 4, !llvm.access.group !89
9115 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9116 // CHECK9: omp.body.continue:
9117 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9118 // CHECK9: omp.inner.for.inc:
9119 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
9120 // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
9121 // CHECK9-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
9122 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
9123 // CHECK9: omp.inner.for.end:
9124 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9125 // CHECK9: omp.dispatch.inc:
9126 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9127 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9128 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
9129 // CHECK9-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
9130 // CHECK9-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9131 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9132 // CHECK9-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
9133 // CHECK9-NEXT: store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
9134 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
9135 // CHECK9: omp.dispatch.end:
9136 // CHECK9-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9137 // CHECK9-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
9138 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
9139 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9140 // CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
9141 // CHECK9-NEXT: br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9142 // CHECK9: .omp.final.then:
9143 // CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9144 // CHECK9-NEXT: [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
9145 // CHECK9-NEXT: [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
9146 // CHECK9-NEXT: [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
9147 // CHECK9-NEXT: [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
9148 // CHECK9-NEXT: store i32 [[ADD23]], i32* [[I6]], align 4
9149 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
9150 // CHECK9: .omp.final.done:
9151 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
9152 // CHECK9: omp.precond.end:
9153 // CHECK9-NEXT: ret void
9154 //
9155 //
9156 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
9157 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
9158 // CHECK9-NEXT: entry:
9159 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
9160 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
9161 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
9162 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
9163 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
9164 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
9165 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
9166 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
9167 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9168 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
9169 // CHECK9-NEXT: ret void
9170 //
9171 //
9172 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..46
9173 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9174 // CHECK9-NEXT: entry:
9175 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9176 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9177 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
9178 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
9179 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
9180 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
9181 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9182 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
9183 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9184 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9185 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
9186 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9187 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9188 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9189 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9190 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4
9191 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9192 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9193 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
9194 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
9195 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
9196 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
9197 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9198 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9199 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9200 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9201 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9202 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9203 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9204 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9205 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9206 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9207 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9208 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
9209 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9210 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9211 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9212 // CHECK9: omp.precond.then:
9213 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9214 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9215 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9216 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9217 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9218 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9219 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9220 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9221 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9222 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9223 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
9224 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9225 // CHECK9: cond.true:
9226 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9227 // CHECK9-NEXT: br label [[COND_END:%.*]]
9228 // CHECK9: cond.false:
9229 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9230 // CHECK9-NEXT: br label [[COND_END]]
9231 // CHECK9: cond.end:
9232 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
9233 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9234 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9235 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
9236 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9237 // CHECK9: omp.inner.for.cond:
9238 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
9239 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
9240 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
9241 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9242 // CHECK9: omp.inner.for.body:
9243 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
9244 // CHECK9-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
9245 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
9246 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9247 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
9248 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9249 // CHECK9: omp.inner.for.inc:
9250 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
9251 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
9252 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9253 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
9254 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
9255 // CHECK9: omp.inner.for.end:
9256 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9257 // CHECK9: omp.loop.exit:
9258 // CHECK9-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9259 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9260 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9261 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9262 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9263 // CHECK9-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9264 // CHECK9: .omp.final.then:
9265 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9266 // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9267 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9268 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9269 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9270 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
9271 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
9272 // CHECK9: .omp.final.done:
9273 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
9274 // CHECK9: omp.precond.end:
9275 // CHECK9-NEXT: ret void
9276 //
9277 //
9278 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..47
9279 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9280 // CHECK9-NEXT: entry:
9281 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9282 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9283 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9284 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9285 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
9286 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
9287 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
9288 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
9289 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9290 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
9291 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9292 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9293 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
9294 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9295 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9296 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9297 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9298 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
9299 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9300 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9301 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9302 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9303 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
9304 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
9305 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
9306 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
9307 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9308 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9309 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9310 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9311 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9312 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9313 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9314 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9315 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9316 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9317 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9318 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
9319 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9320 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9321 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9322 // CHECK9: omp.precond.then:
9323 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
9324 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9325 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9326 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9327 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9328 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9329 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9330 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9331 // CHECK9-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9332 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9333 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9334 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9335 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9336 // CHECK9-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9337 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
9338 // CHECK9-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
9339 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9340 // CHECK9: omp.dispatch.cond:
9341 // CHECK9-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9342 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
9343 // CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
9344 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
9345 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9346 // CHECK9: omp.dispatch.body:
9347 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9348 // CHECK9-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
9349 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9350 // CHECK9: omp.inner.for.cond:
9351 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9352 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
9353 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
9354 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9355 // CHECK9: omp.inner.for.body:
9356 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9357 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
9358 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9359 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
9360 // CHECK9-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
9361 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9362 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
9363 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
9364 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
9365 // CHECK9-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
9366 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9367 // CHECK9-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
9368 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
9369 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
9370 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
9371 // CHECK9-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
9372 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9373 // CHECK9-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
9374 // CHECK9-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
9375 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
9376 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9377 // CHECK9: omp.body.continue:
9378 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9379 // CHECK9: omp.inner.for.inc:
9380 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9381 // CHECK9-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
9382 // CHECK9-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9383 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
9384 // CHECK9: omp.inner.for.end:
9385 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9386 // CHECK9: omp.dispatch.inc:
9387 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
9388 // CHECK9: omp.dispatch.end:
9389 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9390 // CHECK9-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
9391 // CHECK9-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9392 // CHECK9: .omp.final.then:
9393 // CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9394 // CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
9395 // CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
9396 // CHECK9-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
9397 // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
9398 // CHECK9-NEXT: store i32 [[ADD15]], i32* [[I4]], align 4
9399 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
9400 // CHECK9: .omp.final.done:
9401 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
9402 // CHECK9: omp.precond.end:
9403 // CHECK9-NEXT: ret void
9404 //
9405 //
9406 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
9407 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
9408 // CHECK9-NEXT: entry:
9409 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i64, align 8
9410 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
9411 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
9412 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
9413 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
9414 // CHECK9-NEXT: store i64 [[CH]], i64* [[CH_ADDR]], align 8
9415 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
9416 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
9417 // CHECK9-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
9418 // CHECK9-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
9419 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
9420 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9421 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
9422 // CHECK9-NEXT: ret void
9423 //
9424 //
9425 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..50
9426 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9427 // CHECK9-NEXT: entry:
9428 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9429 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9430 // CHECK9-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 8
9431 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
9432 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
9433 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
9434 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
9435 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9436 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9437 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
9438 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9439 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9440 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
9441 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9442 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9443 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9444 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9445 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4
9446 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9447 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9448 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9449 // CHECK9-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 8
9450 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
9451 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
9452 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
9453 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
9454 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
9455 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9456 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9457 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9458 // CHECK9-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9459 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
9460 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
9461 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
9462 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9463 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9464 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
9465 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9466 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9467 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9468 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
9469 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9470 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
9471 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9472 // CHECK9: omp.precond.then:
9473 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9474 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9475 // CHECK9-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
9476 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9477 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9478 // CHECK9-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9479 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9480 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9481 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9482 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9483 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9484 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9485 // CHECK9: cond.true:
9486 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9487 // CHECK9-NEXT: br label [[COND_END:%.*]]
9488 // CHECK9: cond.false:
9489 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9490 // CHECK9-NEXT: br label [[COND_END]]
9491 // CHECK9: cond.end:
9492 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9493 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9494 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9495 // CHECK9-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9496 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9497 // CHECK9: omp.inner.for.cond:
9498 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9499 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
9500 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9501 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9502 // CHECK9: omp.inner.for.body:
9503 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
9504 // CHECK9-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9505 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
9506 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
9507 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
9508 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
9509 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
9510 // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
9511 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
9512 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9513 // CHECK9: omp.inner.for.inc:
9514 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9515 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
9516 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
9517 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9518 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
9519 // CHECK9: omp.inner.for.end:
9520 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9521 // CHECK9: omp.loop.exit:
9522 // CHECK9-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9523 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
9524 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
9525 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9526 // CHECK9-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
9527 // CHECK9-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9528 // CHECK9: .omp.final.then:
9529 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9530 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
9531 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9532 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9533 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9534 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
9535 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
9536 // CHECK9: .omp.final.done:
9537 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
9538 // CHECK9: omp.precond.end:
9539 // CHECK9-NEXT: ret void
9540 //
9541 //
9542 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..51
9543 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
9544 // CHECK9-NEXT: entry:
9545 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9546 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9547 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9548 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9549 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
9550 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
9551 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
9552 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 8
9553 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9554 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9555 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
9556 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9557 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9558 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
9559 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9560 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9561 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9562 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9563 // CHECK9-NEXT: [[I6:%.*]] = alloca i32, align 4
9564 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9565 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9566 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9567 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9568 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
9569 // CHECK9-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
9570 // CHECK9-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
9571 // CHECK9-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 8
9572 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9573 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9574 // CHECK9-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9575 // CHECK9-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9576 // CHECK9-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9577 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9578 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9579 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9580 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9581 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9582 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9583 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9584 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9585 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4
9586 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9587 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9588 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9589 // CHECK9: omp.precond.then:
9590 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
9591 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9592 // CHECK9-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9593 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9594 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
9595 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9596 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
9597 // CHECK9-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
9598 // CHECK9-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
9599 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9600 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9601 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
9602 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9603 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9604 // CHECK9-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9605 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
9606 // CHECK9-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
9607 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9608 // CHECK9: omp.dispatch.cond:
9609 // CHECK9-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9610 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
9611 // CHECK9-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
9612 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
9613 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9614 // CHECK9: omp.dispatch.body:
9615 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9616 // CHECK9-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
9617 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9618 // CHECK9: omp.inner.for.cond:
9619 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9620 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
9621 // CHECK9-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
9622 // CHECK9-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9623 // CHECK9: omp.inner.for.body:
9624 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9625 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
9626 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9627 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
9628 // CHECK9-NEXT: [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
9629 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9630 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
9631 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
9632 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
9633 // CHECK9-NEXT: [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
9634 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9635 // CHECK9-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
9636 // CHECK9-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
9637 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
9638 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
9639 // CHECK9-NEXT: [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
9640 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9641 // CHECK9-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
9642 // CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
9643 // CHECK9-NEXT: store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
9644 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9645 // CHECK9: omp.body.continue:
9646 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9647 // CHECK9: omp.inner.for.inc:
9648 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9649 // CHECK9-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
9650 // CHECK9-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9651 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
9652 // CHECK9: omp.inner.for.end:
9653 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9654 // CHECK9: omp.dispatch.inc:
9655 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]]
9656 // CHECK9: omp.dispatch.end:
9657 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9658 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9659 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9660 // CHECK9: .omp.final.then:
9661 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9662 // CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
9663 // CHECK9-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
9664 // CHECK9-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
9665 // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
9666 // CHECK9-NEXT: store i32 [[ADD17]], i32* [[I6]], align 4
9667 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
9668 // CHECK9: .omp.final.done:
9669 // CHECK9-NEXT: br label [[OMP_PRECOND_END]]
9670 // CHECK9: omp.precond.end:
9671 // CHECK9-NEXT: ret void
9672 //
9673 //
9674 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
9675 // CHECK9-SAME: () #[[ATTR4:[0-9]+]] {
9676 // CHECK9-NEXT: entry:
9677 // CHECK9-NEXT: call void @__tgt_register_requires(i64 1)
9678 // CHECK9-NEXT: ret void
9679 //
9680 //
9681 // CHECK11-LABEL: define {{[^@]+}}@main
9682 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
9683 // CHECK11-NEXT: entry:
9684 // CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
9685 // CHECK11-NEXT: [[A:%.*]] = alloca double*, align 4
9686 // CHECK11-NEXT: [[B:%.*]] = alloca double*, align 4
9687 // CHECK11-NEXT: [[C:%.*]] = alloca double*, align 4
9688 // CHECK11-NEXT: [[N:%.*]] = alloca i32, align 4
9689 // CHECK11-NEXT: [[CH:%.*]] = alloca i32, align 4
9690 // CHECK11-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
9691 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
9692 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
9693 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
9694 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
9695 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9696 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9697 // CHECK11-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
9698 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
9699 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
9700 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
9701 // CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4
9702 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
9703 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
9704 // CHECK11-NEXT: [[CH_CASTED:%.*]] = alloca i32, align 4
9705 // CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4
9706 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [5 x i8*], align 4
9707 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [5 x i8*], align 4
9708 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [5 x i8*], align 4
9709 // CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4
9710 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
9711 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4
9712 // CHECK11-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4
9713 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [4 x i8*], align 4
9714 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [4 x i8*], align 4
9715 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [4 x i8*], align 4
9716 // CHECK11-NEXT: [[_TMP35:%.*]] = alloca i32, align 4
9717 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4
9718 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4
9719 // CHECK11-NEXT: [[CH_CASTED45:%.*]] = alloca i32, align 4
9720 // CHECK11-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4
9721 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [5 x i8*], align 4
9722 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [5 x i8*], align 4
9723 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [5 x i8*], align 4
9724 // CHECK11-NEXT: [[_TMP50:%.*]] = alloca i32, align 4
9725 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
9726 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4
9727 // CHECK11-NEXT: [[N_CASTED60:%.*]] = alloca i32, align 4
9728 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS61:%.*]] = alloca [4 x i8*], align 4
9729 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS62:%.*]] = alloca [4 x i8*], align 4
9730 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS63:%.*]] = alloca [4 x i8*], align 4
9731 // CHECK11-NEXT: [[_TMP64:%.*]] = alloca i32, align 4
9732 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_65:%.*]] = alloca i32, align 4
9733 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_66:%.*]] = alloca i32, align 4
9734 // CHECK11-NEXT: [[CH_CASTED74:%.*]] = alloca i32, align 4
9735 // CHECK11-NEXT: [[N_CASTED75:%.*]] = alloca i32, align 4
9736 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS76:%.*]] = alloca [5 x i8*], align 4
9737 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS77:%.*]] = alloca [5 x i8*], align 4
9738 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS78:%.*]] = alloca [5 x i8*], align 4
9739 // CHECK11-NEXT: [[_TMP79:%.*]] = alloca i32, align 4
9740 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_80:%.*]] = alloca i32, align 4
9741 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_81:%.*]] = alloca i32, align 4
9742 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4
9743 // CHECK11-NEXT: store i32 10000, i32* [[N]], align 4
9744 // CHECK11-NEXT: store i32 100, i32* [[CH]], align 4
9745 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
9746 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[N_CASTED]], align 4
9747 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
9748 // CHECK11-NEXT: [[TMP2:%.*]] = load double*, double** [[A]], align 4
9749 // CHECK11-NEXT: [[TMP3:%.*]] = load double*, double** [[B]], align 4
9750 // CHECK11-NEXT: [[TMP4:%.*]] = load double*, double** [[C]], align 4
9751 // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9752 // CHECK11-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
9753 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
9754 // CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9755 // CHECK11-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
9756 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
9757 // CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
9758 // CHECK11-NEXT: store i8* null, i8** [[TMP9]], align 4
9759 // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9760 // CHECK11-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
9761 // CHECK11-NEXT: store double* [[TMP2]], double** [[TMP11]], align 4
9762 // CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9763 // CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
9764 // CHECK11-NEXT: store double* [[TMP2]], double** [[TMP13]], align 4
9765 // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
9766 // CHECK11-NEXT: store i8* null, i8** [[TMP14]], align 4
9767 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9768 // CHECK11-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
9769 // CHECK11-NEXT: store double* [[TMP3]], double** [[TMP16]], align 4
9770 // CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9771 // CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
9772 // CHECK11-NEXT: store double* [[TMP3]], double** [[TMP18]], align 4
9773 // CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
9774 // CHECK11-NEXT: store i8* null, i8** [[TMP19]], align 4
9775 // CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
9776 // CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
9777 // CHECK11-NEXT: store double* [[TMP4]], double** [[TMP21]], align 4
9778 // CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
9779 // CHECK11-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
9780 // CHECK11-NEXT: store double* [[TMP4]], double** [[TMP23]], align 4
9781 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
9782 // CHECK11-NEXT: store i8* null, i8** [[TMP24]], align 4
9783 // CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9784 // CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9785 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
9786 // CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
9787 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9788 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
9789 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9790 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9791 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9792 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9793 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
9794 // CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
9795 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
9796 // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
9797 // CHECK11-NEXT: store i32 1, i32* [[TMP31]], align 4
9798 // CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
9799 // CHECK11-NEXT: store i32 4, i32* [[TMP32]], align 4
9800 // CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
9801 // CHECK11-NEXT: store i8** [[TMP25]], i8*** [[TMP33]], align 4
9802 // CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
9803 // CHECK11-NEXT: store i8** [[TMP26]], i8*** [[TMP34]], align 4
9804 // CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
9805 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP35]], align 4
9806 // CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
9807 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP36]], align 4
9808 // CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
9809 // CHECK11-NEXT: store i8** null, i8*** [[TMP37]], align 4
9810 // CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
9811 // CHECK11-NEXT: store i8** null, i8*** [[TMP38]], align 4
9812 // CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
9813 // CHECK11-NEXT: store i64 [[TMP30]], i64* [[TMP39]], align 8
9814 // CHECK11-NEXT: [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
9815 // CHECK11-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
9816 // CHECK11-NEXT: br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9817 // CHECK11: omp_offload.failed:
9818 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
9819 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
9820 // CHECK11: omp_offload.cont:
9821 // CHECK11-NEXT: [[TMP42:%.*]] = load i32, i32* [[N]], align 4
9822 // CHECK11-NEXT: store i32 [[TMP42]], i32* [[N_CASTED3]], align 4
9823 // CHECK11-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_CASTED3]], align 4
9824 // CHECK11-NEXT: [[TMP44:%.*]] = load double*, double** [[A]], align 4
9825 // CHECK11-NEXT: [[TMP45:%.*]] = load double*, double** [[B]], align 4
9826 // CHECK11-NEXT: [[TMP46:%.*]] = load double*, double** [[C]], align 4
9827 // CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
9828 // CHECK11-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32*
9829 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[TMP48]], align 4
9830 // CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
9831 // CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
9832 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[TMP50]], align 4
9833 // CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
9834 // CHECK11-NEXT: store i8* null, i8** [[TMP51]], align 4
9835 // CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
9836 // CHECK11-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double**
9837 // CHECK11-NEXT: store double* [[TMP44]], double** [[TMP53]], align 4
9838 // CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
9839 // CHECK11-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to double**
9840 // CHECK11-NEXT: store double* [[TMP44]], double** [[TMP55]], align 4
9841 // CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
9842 // CHECK11-NEXT: store i8* null, i8** [[TMP56]], align 4
9843 // CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
9844 // CHECK11-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to double**
9845 // CHECK11-NEXT: store double* [[TMP45]], double** [[TMP58]], align 4
9846 // CHECK11-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
9847 // CHECK11-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to double**
9848 // CHECK11-NEXT: store double* [[TMP45]], double** [[TMP60]], align 4
9849 // CHECK11-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
9850 // CHECK11-NEXT: store i8* null, i8** [[TMP61]], align 4
9851 // CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
9852 // CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to double**
9853 // CHECK11-NEXT: store double* [[TMP46]], double** [[TMP63]], align 4
9854 // CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
9855 // CHECK11-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to double**
9856 // CHECK11-NEXT: store double* [[TMP46]], double** [[TMP65]], align 4
9857 // CHECK11-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
9858 // CHECK11-NEXT: store i8* null, i8** [[TMP66]], align 4
9859 // CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
9860 // CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
9861 // CHECK11-NEXT: [[TMP69:%.*]] = load i32, i32* [[N]], align 4
9862 // CHECK11-NEXT: store i32 [[TMP69]], i32* [[DOTCAPTURE_EXPR_8]], align 4
9863 // CHECK11-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
9864 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP70]], 0
9865 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
9866 // CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
9867 // CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
9868 // CHECK11-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
9869 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP71]], 1
9870 // CHECK11-NEXT: [[TMP72:%.*]] = zext i32 [[ADD13]] to i64
9871 // CHECK11-NEXT: [[KERNEL_ARGS14:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9872 // CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 0
9873 // CHECK11-NEXT: store i32 1, i32* [[TMP73]], align 4
9874 // CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 1
9875 // CHECK11-NEXT: store i32 4, i32* [[TMP74]], align 4
9876 // CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 2
9877 // CHECK11-NEXT: store i8** [[TMP67]], i8*** [[TMP75]], align 4
9878 // CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 3
9879 // CHECK11-NEXT: store i8** [[TMP68]], i8*** [[TMP76]], align 4
9880 // CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 4
9881 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64** [[TMP77]], align 4
9882 // CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 5
9883 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i64** [[TMP78]], align 4
9884 // CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 6
9885 // CHECK11-NEXT: store i8** null, i8*** [[TMP79]], align 4
9886 // CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 7
9887 // CHECK11-NEXT: store i8** null, i8*** [[TMP80]], align 4
9888 // CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 8
9889 // CHECK11-NEXT: store i64 [[TMP72]], i64* [[TMP81]], align 8
9890 // CHECK11-NEXT: [[TMP82:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]])
9891 // CHECK11-NEXT: [[TMP83:%.*]] = icmp ne i32 [[TMP82]], 0
9892 // CHECK11-NEXT: br i1 [[TMP83]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
9893 // CHECK11: omp_offload.failed15:
9894 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP43]], double* [[TMP44]], double* [[TMP45]], double* [[TMP46]]) #[[ATTR2]]
9895 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]]
9896 // CHECK11: omp_offload.cont16:
9897 // CHECK11-NEXT: [[TMP84:%.*]] = load i32, i32* [[CH]], align 4
9898 // CHECK11-NEXT: store i32 [[TMP84]], i32* [[CH_CASTED]], align 4
9899 // CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[CH_CASTED]], align 4
9900 // CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[N]], align 4
9901 // CHECK11-NEXT: store i32 [[TMP86]], i32* [[N_CASTED17]], align 4
9902 // CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[N_CASTED17]], align 4
9903 // CHECK11-NEXT: [[TMP88:%.*]] = load double*, double** [[A]], align 4
9904 // CHECK11-NEXT: [[TMP89:%.*]] = load double*, double** [[B]], align 4
9905 // CHECK11-NEXT: [[TMP90:%.*]] = load double*, double** [[C]], align 4
9906 // CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0
9907 // CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
9908 // CHECK11-NEXT: store i32 [[TMP85]], i32* [[TMP92]], align 4
9909 // CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0
9910 // CHECK11-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
9911 // CHECK11-NEXT: store i32 [[TMP85]], i32* [[TMP94]], align 4
9912 // CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0
9913 // CHECK11-NEXT: store i8* null, i8** [[TMP95]], align 4
9914 // CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1
9915 // CHECK11-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32*
9916 // CHECK11-NEXT: store i32 [[TMP87]], i32* [[TMP97]], align 4
9917 // CHECK11-NEXT: [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1
9918 // CHECK11-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
9919 // CHECK11-NEXT: store i32 [[TMP87]], i32* [[TMP99]], align 4
9920 // CHECK11-NEXT: [[TMP100:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1
9921 // CHECK11-NEXT: store i8* null, i8** [[TMP100]], align 4
9922 // CHECK11-NEXT: [[TMP101:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2
9923 // CHECK11-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double**
9924 // CHECK11-NEXT: store double* [[TMP88]], double** [[TMP102]], align 4
9925 // CHECK11-NEXT: [[TMP103:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2
9926 // CHECK11-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
9927 // CHECK11-NEXT: store double* [[TMP88]], double** [[TMP104]], align 4
9928 // CHECK11-NEXT: [[TMP105:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2
9929 // CHECK11-NEXT: store i8* null, i8** [[TMP105]], align 4
9930 // CHECK11-NEXT: [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3
9931 // CHECK11-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to double**
9932 // CHECK11-NEXT: store double* [[TMP89]], double** [[TMP107]], align 4
9933 // CHECK11-NEXT: [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3
9934 // CHECK11-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double**
9935 // CHECK11-NEXT: store double* [[TMP89]], double** [[TMP109]], align 4
9936 // CHECK11-NEXT: [[TMP110:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3
9937 // CHECK11-NEXT: store i8* null, i8** [[TMP110]], align 4
9938 // CHECK11-NEXT: [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 4
9939 // CHECK11-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to double**
9940 // CHECK11-NEXT: store double* [[TMP90]], double** [[TMP112]], align 4
9941 // CHECK11-NEXT: [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 4
9942 // CHECK11-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to double**
9943 // CHECK11-NEXT: store double* [[TMP90]], double** [[TMP114]], align 4
9944 // CHECK11-NEXT: [[TMP115:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 4
9945 // CHECK11-NEXT: store i8* null, i8** [[TMP115]], align 4
9946 // CHECK11-NEXT: [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0
9947 // CHECK11-NEXT: [[TMP117:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0
9948 // CHECK11-NEXT: [[TMP118:%.*]] = load i32, i32* [[N]], align 4
9949 // CHECK11-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_22]], align 4
9950 // CHECK11-NEXT: [[TMP119:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
9951 // CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP119]], 0
9952 // CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1
9953 // CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1
9954 // CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4
9955 // CHECK11-NEXT: [[TMP120:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4
9956 // CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP120]], 1
9957 // CHECK11-NEXT: [[TMP121:%.*]] = zext i32 [[ADD27]] to i64
9958 // CHECK11-NEXT: [[KERNEL_ARGS28:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9959 // CHECK11-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 0
9960 // CHECK11-NEXT: store i32 1, i32* [[TMP122]], align 4
9961 // CHECK11-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 1
9962 // CHECK11-NEXT: store i32 5, i32* [[TMP123]], align 4
9963 // CHECK11-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 2
9964 // CHECK11-NEXT: store i8** [[TMP116]], i8*** [[TMP124]], align 4
9965 // CHECK11-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 3
9966 // CHECK11-NEXT: store i8** [[TMP117]], i8*** [[TMP125]], align 4
9967 // CHECK11-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 4
9968 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP126]], align 4
9969 // CHECK11-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 5
9970 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP127]], align 4
9971 // CHECK11-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 6
9972 // CHECK11-NEXT: store i8** null, i8*** [[TMP128]], align 4
9973 // CHECK11-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 7
9974 // CHECK11-NEXT: store i8** null, i8*** [[TMP129]], align 4
9975 // CHECK11-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 8
9976 // CHECK11-NEXT: store i64 [[TMP121]], i64* [[TMP130]], align 8
9977 // CHECK11-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]])
9978 // CHECK11-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0
9979 // CHECK11-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]]
9980 // CHECK11: omp_offload.failed29:
9981 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP85]], i32 [[TMP87]], double* [[TMP88]], double* [[TMP89]], double* [[TMP90]]) #[[ATTR2]]
9982 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]]
9983 // CHECK11: omp_offload.cont30:
9984 // CHECK11-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
9985 // CHECK11-NEXT: store i32 [[TMP133]], i32* [[N_CASTED31]], align 4
9986 // CHECK11-NEXT: [[TMP134:%.*]] = load i32, i32* [[N_CASTED31]], align 4
9987 // CHECK11-NEXT: [[TMP135:%.*]] = load double*, double** [[A]], align 4
9988 // CHECK11-NEXT: [[TMP136:%.*]] = load double*, double** [[B]], align 4
9989 // CHECK11-NEXT: [[TMP137:%.*]] = load double*, double** [[C]], align 4
9990 // CHECK11-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0
9991 // CHECK11-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32*
9992 // CHECK11-NEXT: store i32 [[TMP134]], i32* [[TMP139]], align 4
9993 // CHECK11-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0
9994 // CHECK11-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32*
9995 // CHECK11-NEXT: store i32 [[TMP134]], i32* [[TMP141]], align 4
9996 // CHECK11-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0
9997 // CHECK11-NEXT: store i8* null, i8** [[TMP142]], align 4
9998 // CHECK11-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1
9999 // CHECK11-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to double**
10000 // CHECK11-NEXT: store double* [[TMP135]], double** [[TMP144]], align 4
10001 // CHECK11-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1
10002 // CHECK11-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to double**
10003 // CHECK11-NEXT: store double* [[TMP135]], double** [[TMP146]], align 4
10004 // CHECK11-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1
10005 // CHECK11-NEXT: store i8* null, i8** [[TMP147]], align 4
10006 // CHECK11-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2
10007 // CHECK11-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to double**
10008 // CHECK11-NEXT: store double* [[TMP136]], double** [[TMP149]], align 4
10009 // CHECK11-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2
10010 // CHECK11-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to double**
10011 // CHECK11-NEXT: store double* [[TMP136]], double** [[TMP151]], align 4
10012 // CHECK11-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2
10013 // CHECK11-NEXT: store i8* null, i8** [[TMP152]], align 4
10014 // CHECK11-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 3
10015 // CHECK11-NEXT: [[TMP154:%.*]] = bitcast i8** [[TMP153]] to double**
10016 // CHECK11-NEXT: store double* [[TMP137]], double** [[TMP154]], align 4
10017 // CHECK11-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 3
10018 // CHECK11-NEXT: [[TMP156:%.*]] = bitcast i8** [[TMP155]] to double**
10019 // CHECK11-NEXT: store double* [[TMP137]], double** [[TMP156]], align 4
10020 // CHECK11-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 3
10021 // CHECK11-NEXT: store i8* null, i8** [[TMP157]], align 4
10022 // CHECK11-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0
10023 // CHECK11-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0
10024 // CHECK11-NEXT: [[TMP160:%.*]] = load i32, i32* [[N]], align 4
10025 // CHECK11-NEXT: store i32 [[TMP160]], i32* [[DOTCAPTURE_EXPR_36]], align 4
10026 // CHECK11-NEXT: [[TMP161:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4
10027 // CHECK11-NEXT: [[SUB38:%.*]] = sub nsw i32 [[TMP161]], 0
10028 // CHECK11-NEXT: [[DIV39:%.*]] = sdiv i32 [[SUB38]], 1
10029 // CHECK11-NEXT: [[SUB40:%.*]] = sub nsw i32 [[DIV39]], 1
10030 // CHECK11-NEXT: store i32 [[SUB40]], i32* [[DOTCAPTURE_EXPR_37]], align 4
10031 // CHECK11-NEXT: [[TMP162:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4
10032 // CHECK11-NEXT: [[ADD41:%.*]] = add nsw i32 [[TMP162]], 1
10033 // CHECK11-NEXT: [[TMP163:%.*]] = zext i32 [[ADD41]] to i64
10034 // CHECK11-NEXT: [[KERNEL_ARGS42:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10035 // CHECK11-NEXT: [[TMP164:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 0
10036 // CHECK11-NEXT: store i32 1, i32* [[TMP164]], align 4
10037 // CHECK11-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 1
10038 // CHECK11-NEXT: store i32 4, i32* [[TMP165]], align 4
10039 // CHECK11-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 2
10040 // CHECK11-NEXT: store i8** [[TMP158]], i8*** [[TMP166]], align 4
10041 // CHECK11-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 3
10042 // CHECK11-NEXT: store i8** [[TMP159]], i8*** [[TMP167]], align 4
10043 // CHECK11-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 4
10044 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64** [[TMP168]], align 4
10045 // CHECK11-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 5
10046 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i64** [[TMP169]], align 4
10047 // CHECK11-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 6
10048 // CHECK11-NEXT: store i8** null, i8*** [[TMP170]], align 4
10049 // CHECK11-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 7
10050 // CHECK11-NEXT: store i8** null, i8*** [[TMP171]], align 4
10051 // CHECK11-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 8
10052 // CHECK11-NEXT: store i64 [[TMP163]], i64* [[TMP172]], align 8
10053 // CHECK11-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]])
10054 // CHECK11-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
10055 // CHECK11-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]]
10056 // CHECK11: omp_offload.failed43:
10057 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP134]], double* [[TMP135]], double* [[TMP136]], double* [[TMP137]]) #[[ATTR2]]
10058 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT44]]
10059 // CHECK11: omp_offload.cont44:
10060 // CHECK11-NEXT: [[TMP175:%.*]] = load i32, i32* [[CH]], align 4
10061 // CHECK11-NEXT: store i32 [[TMP175]], i32* [[CH_CASTED45]], align 4
10062 // CHECK11-NEXT: [[TMP176:%.*]] = load i32, i32* [[CH_CASTED45]], align 4
10063 // CHECK11-NEXT: [[TMP177:%.*]] = load i32, i32* [[N]], align 4
10064 // CHECK11-NEXT: store i32 [[TMP177]], i32* [[N_CASTED46]], align 4
10065 // CHECK11-NEXT: [[TMP178:%.*]] = load i32, i32* [[N_CASTED46]], align 4
10066 // CHECK11-NEXT: [[TMP179:%.*]] = load double*, double** [[A]], align 4
10067 // CHECK11-NEXT: [[TMP180:%.*]] = load double*, double** [[B]], align 4
10068 // CHECK11-NEXT: [[TMP181:%.*]] = load double*, double** [[C]], align 4
10069 // CHECK11-NEXT: [[TMP182:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0
10070 // CHECK11-NEXT: [[TMP183:%.*]] = bitcast i8** [[TMP182]] to i32*
10071 // CHECK11-NEXT: store i32 [[TMP176]], i32* [[TMP183]], align 4
10072 // CHECK11-NEXT: [[TMP184:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0
10073 // CHECK11-NEXT: [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
10074 // CHECK11-NEXT: store i32 [[TMP176]], i32* [[TMP185]], align 4
10075 // CHECK11-NEXT: [[TMP186:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0
10076 // CHECK11-NEXT: store i8* null, i8** [[TMP186]], align 4
10077 // CHECK11-NEXT: [[TMP187:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1
10078 // CHECK11-NEXT: [[TMP188:%.*]] = bitcast i8** [[TMP187]] to i32*
10079 // CHECK11-NEXT: store i32 [[TMP178]], i32* [[TMP188]], align 4
10080 // CHECK11-NEXT: [[TMP189:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1
10081 // CHECK11-NEXT: [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32*
10082 // CHECK11-NEXT: store i32 [[TMP178]], i32* [[TMP190]], align 4
10083 // CHECK11-NEXT: [[TMP191:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1
10084 // CHECK11-NEXT: store i8* null, i8** [[TMP191]], align 4
10085 // CHECK11-NEXT: [[TMP192:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2
10086 // CHECK11-NEXT: [[TMP193:%.*]] = bitcast i8** [[TMP192]] to double**
10087 // CHECK11-NEXT: store double* [[TMP179]], double** [[TMP193]], align 4
10088 // CHECK11-NEXT: [[TMP194:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2
10089 // CHECK11-NEXT: [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
10090 // CHECK11-NEXT: store double* [[TMP179]], double** [[TMP195]], align 4
10091 // CHECK11-NEXT: [[TMP196:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2
10092 // CHECK11-NEXT: store i8* null, i8** [[TMP196]], align 4
10093 // CHECK11-NEXT: [[TMP197:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3
10094 // CHECK11-NEXT: [[TMP198:%.*]] = bitcast i8** [[TMP197]] to double**
10095 // CHECK11-NEXT: store double* [[TMP180]], double** [[TMP198]], align 4
10096 // CHECK11-NEXT: [[TMP199:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3
10097 // CHECK11-NEXT: [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
10098 // CHECK11-NEXT: store double* [[TMP180]], double** [[TMP200]], align 4
10099 // CHECK11-NEXT: [[TMP201:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3
10100 // CHECK11-NEXT: store i8* null, i8** [[TMP201]], align 4
10101 // CHECK11-NEXT: [[TMP202:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 4
10102 // CHECK11-NEXT: [[TMP203:%.*]] = bitcast i8** [[TMP202]] to double**
10103 // CHECK11-NEXT: store double* [[TMP181]], double** [[TMP203]], align 4
10104 // CHECK11-NEXT: [[TMP204:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 4
10105 // CHECK11-NEXT: [[TMP205:%.*]] = bitcast i8** [[TMP204]] to double**
10106 // CHECK11-NEXT: store double* [[TMP181]], double** [[TMP205]], align 4
10107 // CHECK11-NEXT: [[TMP206:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 4
10108 // CHECK11-NEXT: store i8* null, i8** [[TMP206]], align 4
10109 // CHECK11-NEXT: [[TMP207:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0
10110 // CHECK11-NEXT: [[TMP208:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0
10111 // CHECK11-NEXT: [[TMP209:%.*]] = load i32, i32* [[N]], align 4
10112 // CHECK11-NEXT: store i32 [[TMP209]], i32* [[DOTCAPTURE_EXPR_51]], align 4
10113 // CHECK11-NEXT: [[TMP210:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
10114 // CHECK11-NEXT: [[SUB53:%.*]] = sub nsw i32 [[TMP210]], 0
10115 // CHECK11-NEXT: [[DIV54:%.*]] = sdiv i32 [[SUB53]], 1
10116 // CHECK11-NEXT: [[SUB55:%.*]] = sub nsw i32 [[DIV54]], 1
10117 // CHECK11-NEXT: store i32 [[SUB55]], i32* [[DOTCAPTURE_EXPR_52]], align 4
10118 // CHECK11-NEXT: [[TMP211:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4
10119 // CHECK11-NEXT: [[ADD56:%.*]] = add nsw i32 [[TMP211]], 1
10120 // CHECK11-NEXT: [[TMP212:%.*]] = zext i32 [[ADD56]] to i64
10121 // CHECK11-NEXT: [[KERNEL_ARGS57:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10122 // CHECK11-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 0
10123 // CHECK11-NEXT: store i32 1, i32* [[TMP213]], align 4
10124 // CHECK11-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 1
10125 // CHECK11-NEXT: store i32 5, i32* [[TMP214]], align 4
10126 // CHECK11-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 2
10127 // CHECK11-NEXT: store i8** [[TMP207]], i8*** [[TMP215]], align 4
10128 // CHECK11-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 3
10129 // CHECK11-NEXT: store i8** [[TMP208]], i8*** [[TMP216]], align 4
10130 // CHECK11-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 4
10131 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64** [[TMP217]], align 4
10132 // CHECK11-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 5
10133 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i64** [[TMP218]], align 4
10134 // CHECK11-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 6
10135 // CHECK11-NEXT: store i8** null, i8*** [[TMP219]], align 4
10136 // CHECK11-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 7
10137 // CHECK11-NEXT: store i8** null, i8*** [[TMP220]], align 4
10138 // CHECK11-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 8
10139 // CHECK11-NEXT: store i64 [[TMP212]], i64* [[TMP221]], align 8
10140 // CHECK11-NEXT: [[TMP222:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]])
10141 // CHECK11-NEXT: [[TMP223:%.*]] = icmp ne i32 [[TMP222]], 0
10142 // CHECK11-NEXT: br i1 [[TMP223]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]]
10143 // CHECK11: omp_offload.failed58:
10144 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP176]], i32 [[TMP178]], double* [[TMP179]], double* [[TMP180]], double* [[TMP181]]) #[[ATTR2]]
10145 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT59]]
10146 // CHECK11: omp_offload.cont59:
10147 // CHECK11-NEXT: [[TMP224:%.*]] = load i32, i32* [[N]], align 4
10148 // CHECK11-NEXT: store i32 [[TMP224]], i32* [[N_CASTED60]], align 4
10149 // CHECK11-NEXT: [[TMP225:%.*]] = load i32, i32* [[N_CASTED60]], align 4
10150 // CHECK11-NEXT: [[TMP226:%.*]] = load double*, double** [[A]], align 4
10151 // CHECK11-NEXT: [[TMP227:%.*]] = load double*, double** [[B]], align 4
10152 // CHECK11-NEXT: [[TMP228:%.*]] = load double*, double** [[C]], align 4
10153 // CHECK11-NEXT: [[TMP229:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 0
10154 // CHECK11-NEXT: [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32*
10155 // CHECK11-NEXT: store i32 [[TMP225]], i32* [[TMP230]], align 4
10156 // CHECK11-NEXT: [[TMP231:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 0
10157 // CHECK11-NEXT: [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32*
10158 // CHECK11-NEXT: store i32 [[TMP225]], i32* [[TMP232]], align 4
10159 // CHECK11-NEXT: [[TMP233:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 0
10160 // CHECK11-NEXT: store i8* null, i8** [[TMP233]], align 4
10161 // CHECK11-NEXT: [[TMP234:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 1
10162 // CHECK11-NEXT: [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
10163 // CHECK11-NEXT: store double* [[TMP226]], double** [[TMP235]], align 4
10164 // CHECK11-NEXT: [[TMP236:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 1
10165 // CHECK11-NEXT: [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
10166 // CHECK11-NEXT: store double* [[TMP226]], double** [[TMP237]], align 4
10167 // CHECK11-NEXT: [[TMP238:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 1
10168 // CHECK11-NEXT: store i8* null, i8** [[TMP238]], align 4
10169 // CHECK11-NEXT: [[TMP239:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 2
10170 // CHECK11-NEXT: [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
10171 // CHECK11-NEXT: store double* [[TMP227]], double** [[TMP240]], align 4
10172 // CHECK11-NEXT: [[TMP241:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 2
10173 // CHECK11-NEXT: [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
10174 // CHECK11-NEXT: store double* [[TMP227]], double** [[TMP242]], align 4
10175 // CHECK11-NEXT: [[TMP243:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 2
10176 // CHECK11-NEXT: store i8* null, i8** [[TMP243]], align 4
10177 // CHECK11-NEXT: [[TMP244:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 3
10178 // CHECK11-NEXT: [[TMP245:%.*]] = bitcast i8** [[TMP244]] to double**
10179 // CHECK11-NEXT: store double* [[TMP228]], double** [[TMP245]], align 4
10180 // CHECK11-NEXT: [[TMP246:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 3
10181 // CHECK11-NEXT: [[TMP247:%.*]] = bitcast i8** [[TMP246]] to double**
10182 // CHECK11-NEXT: store double* [[TMP228]], double** [[TMP247]], align 4
10183 // CHECK11-NEXT: [[TMP248:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 3
10184 // CHECK11-NEXT: store i8* null, i8** [[TMP248]], align 4
10185 // CHECK11-NEXT: [[TMP249:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 0
10186 // CHECK11-NEXT: [[TMP250:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 0
10187 // CHECK11-NEXT: [[TMP251:%.*]] = load i32, i32* [[N]], align 4
10188 // CHECK11-NEXT: store i32 [[TMP251]], i32* [[DOTCAPTURE_EXPR_65]], align 4
10189 // CHECK11-NEXT: [[TMP252:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_65]], align 4
10190 // CHECK11-NEXT: [[SUB67:%.*]] = sub nsw i32 [[TMP252]], 0
10191 // CHECK11-NEXT: [[DIV68:%.*]] = sdiv i32 [[SUB67]], 1
10192 // CHECK11-NEXT: [[SUB69:%.*]] = sub nsw i32 [[DIV68]], 1
10193 // CHECK11-NEXT: store i32 [[SUB69]], i32* [[DOTCAPTURE_EXPR_66]], align 4
10194 // CHECK11-NEXT: [[TMP253:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_66]], align 4
10195 // CHECK11-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP253]], 1
10196 // CHECK11-NEXT: [[TMP254:%.*]] = zext i32 [[ADD70]] to i64
10197 // CHECK11-NEXT: [[KERNEL_ARGS71:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10198 // CHECK11-NEXT: [[TMP255:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 0
10199 // CHECK11-NEXT: store i32 1, i32* [[TMP255]], align 4
10200 // CHECK11-NEXT: [[TMP256:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 1
10201 // CHECK11-NEXT: store i32 4, i32* [[TMP256]], align 4
10202 // CHECK11-NEXT: [[TMP257:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 2
10203 // CHECK11-NEXT: store i8** [[TMP249]], i8*** [[TMP257]], align 4
10204 // CHECK11-NEXT: [[TMP258:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 3
10205 // CHECK11-NEXT: store i8** [[TMP250]], i8*** [[TMP258]], align 4
10206 // CHECK11-NEXT: [[TMP259:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 4
10207 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64** [[TMP259]], align 4
10208 // CHECK11-NEXT: [[TMP260:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 5
10209 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i64** [[TMP260]], align 4
10210 // CHECK11-NEXT: [[TMP261:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 6
10211 // CHECK11-NEXT: store i8** null, i8*** [[TMP261]], align 4
10212 // CHECK11-NEXT: [[TMP262:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 7
10213 // CHECK11-NEXT: store i8** null, i8*** [[TMP262]], align 4
10214 // CHECK11-NEXT: [[TMP263:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 8
10215 // CHECK11-NEXT: store i64 [[TMP254]], i64* [[TMP263]], align 8
10216 // CHECK11-NEXT: [[TMP264:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]])
10217 // CHECK11-NEXT: [[TMP265:%.*]] = icmp ne i32 [[TMP264]], 0
10218 // CHECK11-NEXT: br i1 [[TMP265]], label [[OMP_OFFLOAD_FAILED72:%.*]], label [[OMP_OFFLOAD_CONT73:%.*]]
10219 // CHECK11: omp_offload.failed72:
10220 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP225]], double* [[TMP226]], double* [[TMP227]], double* [[TMP228]]) #[[ATTR2]]
10221 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT73]]
10222 // CHECK11: omp_offload.cont73:
10223 // CHECK11-NEXT: [[TMP266:%.*]] = load i32, i32* [[CH]], align 4
10224 // CHECK11-NEXT: store i32 [[TMP266]], i32* [[CH_CASTED74]], align 4
10225 // CHECK11-NEXT: [[TMP267:%.*]] = load i32, i32* [[CH_CASTED74]], align 4
10226 // CHECK11-NEXT: [[TMP268:%.*]] = load i32, i32* [[N]], align 4
10227 // CHECK11-NEXT: store i32 [[TMP268]], i32* [[N_CASTED75]], align 4
10228 // CHECK11-NEXT: [[TMP269:%.*]] = load i32, i32* [[N_CASTED75]], align 4
10229 // CHECK11-NEXT: [[TMP270:%.*]] = load double*, double** [[A]], align 4
10230 // CHECK11-NEXT: [[TMP271:%.*]] = load double*, double** [[B]], align 4
10231 // CHECK11-NEXT: [[TMP272:%.*]] = load double*, double** [[C]], align 4
10232 // CHECK11-NEXT: [[TMP273:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 0
10233 // CHECK11-NEXT: [[TMP274:%.*]] = bitcast i8** [[TMP273]] to i32*
10234 // CHECK11-NEXT: store i32 [[TMP267]], i32* [[TMP274]], align 4
10235 // CHECK11-NEXT: [[TMP275:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 0
10236 // CHECK11-NEXT: [[TMP276:%.*]] = bitcast i8** [[TMP275]] to i32*
10237 // CHECK11-NEXT: store i32 [[TMP267]], i32* [[TMP276]], align 4
10238 // CHECK11-NEXT: [[TMP277:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 0
10239 // CHECK11-NEXT: store i8* null, i8** [[TMP277]], align 4
10240 // CHECK11-NEXT: [[TMP278:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 1
10241 // CHECK11-NEXT: [[TMP279:%.*]] = bitcast i8** [[TMP278]] to i32*
10242 // CHECK11-NEXT: store i32 [[TMP269]], i32* [[TMP279]], align 4
10243 // CHECK11-NEXT: [[TMP280:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 1
10244 // CHECK11-NEXT: [[TMP281:%.*]] = bitcast i8** [[TMP280]] to i32*
10245 // CHECK11-NEXT: store i32 [[TMP269]], i32* [[TMP281]], align 4
10246 // CHECK11-NEXT: [[TMP282:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 1
10247 // CHECK11-NEXT: store i8* null, i8** [[TMP282]], align 4
10248 // CHECK11-NEXT: [[TMP283:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 2
10249 // CHECK11-NEXT: [[TMP284:%.*]] = bitcast i8** [[TMP283]] to double**
10250 // CHECK11-NEXT: store double* [[TMP270]], double** [[TMP284]], align 4
10251 // CHECK11-NEXT: [[TMP285:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 2
10252 // CHECK11-NEXT: [[TMP286:%.*]] = bitcast i8** [[TMP285]] to double**
10253 // CHECK11-NEXT: store double* [[TMP270]], double** [[TMP286]], align 4
10254 // CHECK11-NEXT: [[TMP287:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 2
10255 // CHECK11-NEXT: store i8* null, i8** [[TMP287]], align 4
10256 // CHECK11-NEXT: [[TMP288:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 3
10257 // CHECK11-NEXT: [[TMP289:%.*]] = bitcast i8** [[TMP288]] to double**
10258 // CHECK11-NEXT: store double* [[TMP271]], double** [[TMP289]], align 4
10259 // CHECK11-NEXT: [[TMP290:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 3
10260 // CHECK11-NEXT: [[TMP291:%.*]] = bitcast i8** [[TMP290]] to double**
10261 // CHECK11-NEXT: store double* [[TMP271]], double** [[TMP291]], align 4
10262 // CHECK11-NEXT: [[TMP292:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 3
10263 // CHECK11-NEXT: store i8* null, i8** [[TMP292]], align 4
10264 // CHECK11-NEXT: [[TMP293:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 4
10265 // CHECK11-NEXT: [[TMP294:%.*]] = bitcast i8** [[TMP293]] to double**
10266 // CHECK11-NEXT: store double* [[TMP272]], double** [[TMP294]], align 4
10267 // CHECK11-NEXT: [[TMP295:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 4
10268 // CHECK11-NEXT: [[TMP296:%.*]] = bitcast i8** [[TMP295]] to double**
10269 // CHECK11-NEXT: store double* [[TMP272]], double** [[TMP296]], align 4
10270 // CHECK11-NEXT: [[TMP297:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 4
10271 // CHECK11-NEXT: store i8* null, i8** [[TMP297]], align 4
10272 // CHECK11-NEXT: [[TMP298:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 0
10273 // CHECK11-NEXT: [[TMP299:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 0
10274 // CHECK11-NEXT: [[TMP300:%.*]] = load i32, i32* [[N]], align 4
10275 // CHECK11-NEXT: store i32 [[TMP300]], i32* [[DOTCAPTURE_EXPR_80]], align 4
10276 // CHECK11-NEXT: [[TMP301:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_80]], align 4
10277 // CHECK11-NEXT: [[SUB82:%.*]] = sub nsw i32 [[TMP301]], 0
10278 // CHECK11-NEXT: [[DIV83:%.*]] = sdiv i32 [[SUB82]], 1
10279 // CHECK11-NEXT: [[SUB84:%.*]] = sub nsw i32 [[DIV83]], 1
10280 // CHECK11-NEXT: store i32 [[SUB84]], i32* [[DOTCAPTURE_EXPR_81]], align 4
10281 // CHECK11-NEXT: [[TMP302:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_81]], align 4
10282 // CHECK11-NEXT: [[ADD85:%.*]] = add nsw i32 [[TMP302]], 1
10283 // CHECK11-NEXT: [[TMP303:%.*]] = zext i32 [[ADD85]] to i64
10284 // CHECK11-NEXT: [[KERNEL_ARGS86:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10285 // CHECK11-NEXT: [[TMP304:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 0
10286 // CHECK11-NEXT: store i32 1, i32* [[TMP304]], align 4
10287 // CHECK11-NEXT: [[TMP305:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 1
10288 // CHECK11-NEXT: store i32 5, i32* [[TMP305]], align 4
10289 // CHECK11-NEXT: [[TMP306:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 2
10290 // CHECK11-NEXT: store i8** [[TMP298]], i8*** [[TMP306]], align 4
10291 // CHECK11-NEXT: [[TMP307:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 3
10292 // CHECK11-NEXT: store i8** [[TMP299]], i8*** [[TMP307]], align 4
10293 // CHECK11-NEXT: [[TMP308:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 4
10294 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64** [[TMP308]], align 4
10295 // CHECK11-NEXT: [[TMP309:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 5
10296 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i64** [[TMP309]], align 4
10297 // CHECK11-NEXT: [[TMP310:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 6
10298 // CHECK11-NEXT: store i8** null, i8*** [[TMP310]], align 4
10299 // CHECK11-NEXT: [[TMP311:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 7
10300 // CHECK11-NEXT: store i8** null, i8*** [[TMP311]], align 4
10301 // CHECK11-NEXT: [[TMP312:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 8
10302 // CHECK11-NEXT: store i64 [[TMP303]], i64* [[TMP312]], align 8
10303 // CHECK11-NEXT: [[TMP313:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]])
10304 // CHECK11-NEXT: [[TMP314:%.*]] = icmp ne i32 [[TMP313]], 0
10305 // CHECK11-NEXT: br i1 [[TMP314]], label [[OMP_OFFLOAD_FAILED87:%.*]], label [[OMP_OFFLOAD_CONT88:%.*]]
10306 // CHECK11: omp_offload.failed87:
10307 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP267]], i32 [[TMP269]], double* [[TMP270]], double* [[TMP271]], double* [[TMP272]]) #[[ATTR2]]
10308 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT88]]
10309 // CHECK11: omp_offload.cont88:
10310 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
10311 // CHECK11-NEXT: ret i32 [[CALL]]
10312 //
10313 //
10314 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
10315 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
10316 // CHECK11-NEXT: entry:
10317 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
10318 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
10319 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
10320 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
10321 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
10322 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
10323 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
10324 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
10325 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10326 // CHECK11-NEXT: ret void
10327 //
10328 //
10329 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
10330 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10331 // CHECK11-NEXT: entry:
10332 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10333 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10334 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10335 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10336 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10337 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10338 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10339 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10340 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10341 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10342 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10343 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10344 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10345 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10346 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10347 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10348 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10349 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10350 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10351 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10352 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10353 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10354 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10355 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10356 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10357 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10358 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10359 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10360 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10361 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10362 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10363 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10364 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10365 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10366 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10367 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10368 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10369 // CHECK11: omp.precond.then:
10370 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10371 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10372 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10373 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10374 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10375 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10376 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10377 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10378 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10379 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10380 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10381 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10382 // CHECK11: cond.true:
10383 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10384 // CHECK11-NEXT: br label [[COND_END:%.*]]
10385 // CHECK11: cond.false:
10386 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10387 // CHECK11-NEXT: br label [[COND_END]]
10388 // CHECK11: cond.end:
10389 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10390 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10391 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10392 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10393 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10394 // CHECK11: omp.inner.for.cond:
10395 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10396 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
10397 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10398 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10399 // CHECK11: omp.inner.for.body:
10400 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
10401 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
10402 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
10403 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10404 // CHECK11: omp.inner.for.inc:
10405 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10406 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
10407 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
10408 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10409 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
10410 // CHECK11: omp.inner.for.end:
10411 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10412 // CHECK11: omp.loop.exit:
10413 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10414 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
10415 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
10416 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10417 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
10418 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10419 // CHECK11: .omp.final.then:
10420 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10421 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
10422 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10423 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10424 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10425 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
10426 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
10427 // CHECK11: .omp.final.done:
10428 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
10429 // CHECK11: omp.precond.end:
10430 // CHECK11-NEXT: ret void
10431 //
10432 //
10433 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
10434 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10435 // CHECK11-NEXT: entry:
10436 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10437 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10438 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10439 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10440 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10441 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10442 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10443 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10444 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10445 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10446 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10447 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10448 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10449 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10450 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10451 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10452 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10453 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10454 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10455 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10456 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10457 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10458 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10459 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10460 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10461 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10462 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10463 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10464 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10465 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10466 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10467 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10468 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10469 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10470 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10471 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10472 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10473 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10474 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10475 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10476 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10477 // CHECK11: omp.precond.then:
10478 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
10479 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10480 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10481 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10482 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10483 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10484 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10485 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10486 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10487 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10488 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10489 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10490 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10491 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10492 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10493 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10494 // CHECK11: cond.true:
10495 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10496 // CHECK11-NEXT: br label [[COND_END:%.*]]
10497 // CHECK11: cond.false:
10498 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10499 // CHECK11-NEXT: br label [[COND_END]]
10500 // CHECK11: cond.end:
10501 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10502 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10503 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10504 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10505 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10506 // CHECK11: omp.inner.for.cond:
10507 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10508 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
10509 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10510 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10511 // CHECK11: omp.inner.for.body:
10512 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10513 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10514 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10515 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
10516 // CHECK11-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
10517 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10518 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10519 // CHECK11-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
10520 // CHECK11-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
10521 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10522 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10523 // CHECK11-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
10524 // CHECK11-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10525 // CHECK11-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
10526 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10527 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10528 // CHECK11-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
10529 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10530 // CHECK11: omp.body.continue:
10531 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10532 // CHECK11: omp.inner.for.inc:
10533 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10534 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10535 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10536 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
10537 // CHECK11: omp.inner.for.end:
10538 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10539 // CHECK11: omp.loop.exit:
10540 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10541 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10542 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10543 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10544 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10545 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10546 // CHECK11: .omp.final.then:
10547 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10548 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10549 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10550 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10551 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10552 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
10553 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
10554 // CHECK11: .omp.final.done:
10555 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
10556 // CHECK11: omp.precond.end:
10557 // CHECK11-NEXT: ret void
10558 //
10559 //
10560 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
10561 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10562 // CHECK11-NEXT: entry:
10563 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
10564 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
10565 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
10566 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
10567 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
10568 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
10569 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
10570 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
10571 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10572 // CHECK11-NEXT: ret void
10573 //
10574 //
10575 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
10576 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10577 // CHECK11-NEXT: entry:
10578 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10579 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10580 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10581 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10582 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10583 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10584 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10585 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10586 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10587 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10588 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10589 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10590 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10591 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10592 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10593 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10594 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10595 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10596 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10597 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10598 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10599 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10600 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10601 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10602 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10603 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10604 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10605 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10606 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10607 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10608 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10609 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10610 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10611 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10612 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10613 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10614 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10615 // CHECK11: omp.precond.then:
10616 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10617 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10618 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10619 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10620 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10621 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10622 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10623 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10624 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10625 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10626 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10627 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10628 // CHECK11: cond.true:
10629 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10630 // CHECK11-NEXT: br label [[COND_END:%.*]]
10631 // CHECK11: cond.false:
10632 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10633 // CHECK11-NEXT: br label [[COND_END]]
10634 // CHECK11: cond.end:
10635 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10636 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10637 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10638 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10639 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10640 // CHECK11: omp.inner.for.cond:
10641 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10642 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
10643 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10644 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10645 // CHECK11: omp.inner.for.body:
10646 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
10647 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
10648 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
10649 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10650 // CHECK11: omp.inner.for.inc:
10651 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10652 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
10653 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
10654 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10655 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
10656 // CHECK11: omp.inner.for.end:
10657 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10658 // CHECK11: omp.loop.exit:
10659 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10660 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
10661 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
10662 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10663 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
10664 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10665 // CHECK11: .omp.final.then:
10666 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10667 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
10668 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10669 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10670 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10671 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
10672 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
10673 // CHECK11: .omp.final.done:
10674 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
10675 // CHECK11: omp.precond.end:
10676 // CHECK11-NEXT: ret void
10677 //
10678 //
10679 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
10680 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10681 // CHECK11-NEXT: entry:
10682 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10683 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10684 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10685 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10686 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10687 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10688 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10689 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10690 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10691 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10692 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10693 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10694 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10695 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10696 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10697 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10698 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10699 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10700 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10701 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10702 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10703 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10704 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10705 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10706 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10707 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10708 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10709 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10710 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10711 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10712 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10713 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10714 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10715 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10716 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10717 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10718 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10719 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10720 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10721 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10722 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10723 // CHECK11: omp.precond.then:
10724 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
10725 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10726 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10727 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10728 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10729 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10730 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10731 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10732 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10733 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10734 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10735 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10736 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10737 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10738 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10739 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10740 // CHECK11: cond.true:
10741 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10742 // CHECK11-NEXT: br label [[COND_END:%.*]]
10743 // CHECK11: cond.false:
10744 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10745 // CHECK11-NEXT: br label [[COND_END]]
10746 // CHECK11: cond.end:
10747 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10748 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10749 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10750 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10751 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10752 // CHECK11: omp.inner.for.cond:
10753 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10754 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
10755 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10756 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10757 // CHECK11: omp.inner.for.body:
10758 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10759 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10760 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10761 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
10762 // CHECK11-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
10763 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10764 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10765 // CHECK11-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
10766 // CHECK11-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
10767 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10768 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10769 // CHECK11-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
10770 // CHECK11-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10771 // CHECK11-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
10772 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10773 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10774 // CHECK11-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
10775 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10776 // CHECK11: omp.body.continue:
10777 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10778 // CHECK11: omp.inner.for.inc:
10779 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10780 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10781 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10782 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
10783 // CHECK11: omp.inner.for.end:
10784 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10785 // CHECK11: omp.loop.exit:
10786 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10787 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10788 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10789 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10790 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10791 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10792 // CHECK11: .omp.final.then:
10793 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10794 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10795 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10796 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10797 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10798 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
10799 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
10800 // CHECK11: .omp.final.done:
10801 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
10802 // CHECK11: omp.precond.end:
10803 // CHECK11-NEXT: ret void
10804 //
10805 //
10806 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
10807 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10808 // CHECK11-NEXT: entry:
10809 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
10810 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
10811 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
10812 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
10813 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
10814 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
10815 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
10816 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
10817 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
10818 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
10819 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10820 // CHECK11-NEXT: ret void
10821 //
10822 //
10823 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6
10824 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10825 // CHECK11-NEXT: entry:
10826 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10827 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10828 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
10829 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10830 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10831 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10832 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10833 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10834 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10835 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10836 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10837 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10838 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10839 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10840 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10841 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10842 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10843 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10844 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10845 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
10846 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10847 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10848 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10849 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10850 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
10851 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10852 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
10853 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
10854 // CHECK11-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
10855 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
10856 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10857 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10858 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
10859 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10860 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10861 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10862 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10863 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10864 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
10865 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10866 // CHECK11: omp.precond.then:
10867 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10868 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10869 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
10870 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10871 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10872 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
10873 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10874 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10875 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
10876 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10877 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10878 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10879 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10880 // CHECK11: cond.true:
10881 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10882 // CHECK11-NEXT: br label [[COND_END:%.*]]
10883 // CHECK11: cond.false:
10884 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10885 // CHECK11-NEXT: br label [[COND_END]]
10886 // CHECK11: cond.end:
10887 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10888 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10889 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10890 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10891 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10892 // CHECK11: omp.inner.for.cond:
10893 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10894 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10895 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
10896 // CHECK11-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
10897 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10898 // CHECK11: omp.inner.for.body:
10899 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10900 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10901 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
10902 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10903 // CHECK11: omp.inner.for.inc:
10904 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10905 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10906 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10907 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10908 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10909 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10910 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
10911 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10912 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10913 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10914 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10915 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10916 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10917 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10918 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
10919 // CHECK11-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
10920 // CHECK11: cond.true10:
10921 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10922 // CHECK11-NEXT: br label [[COND_END12:%.*]]
10923 // CHECK11: cond.false11:
10924 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10925 // CHECK11-NEXT: br label [[COND_END12]]
10926 // CHECK11: cond.end12:
10927 // CHECK11-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
10928 // CHECK11-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10929 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10930 // CHECK11-NEXT: store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10931 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
10932 // CHECK11: omp.inner.for.end:
10933 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10934 // CHECK11: omp.loop.exit:
10935 // CHECK11-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10936 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
10937 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
10938 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10939 // CHECK11-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
10940 // CHECK11-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10941 // CHECK11: .omp.final.then:
10942 // CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10943 // CHECK11-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
10944 // CHECK11-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
10945 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
10946 // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
10947 // CHECK11-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
10948 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
10949 // CHECK11: .omp.final.done:
10950 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
10951 // CHECK11: omp.precond.end:
10952 // CHECK11-NEXT: ret void
10953 //
10954 //
10955 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7
10956 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10957 // CHECK11-NEXT: entry:
10958 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10959 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10960 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10961 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10962 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
10963 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
10964 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
10965 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
10966 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10967 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
10968 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10969 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10970 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
10971 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10972 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10973 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10974 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10975 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
10976 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10977 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10978 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10979 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10980 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
10981 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
10982 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
10983 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
10984 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10985 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10986 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10987 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10988 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10989 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10990 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10991 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10992 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10993 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10994 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10995 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
10996 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10997 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10998 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10999 // CHECK11: omp.precond.then:
11000 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
11001 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11002 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11003 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11004 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11005 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11006 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11007 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11008 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11009 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11010 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11011 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11012 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11013 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11014 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11015 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11016 // CHECK11: cond.true:
11017 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11018 // CHECK11-NEXT: br label [[COND_END:%.*]]
11019 // CHECK11: cond.false:
11020 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11021 // CHECK11-NEXT: br label [[COND_END]]
11022 // CHECK11: cond.end:
11023 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11024 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11025 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11026 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11027 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11028 // CHECK11: omp.inner.for.cond:
11029 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
11030 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
11031 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11032 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11033 // CHECK11: omp.inner.for.body:
11034 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
11035 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11036 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11037 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
11038 // CHECK11-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
11039 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
11040 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
11041 // CHECK11-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
11042 // CHECK11-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
11043 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
11044 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
11045 // CHECK11-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
11046 // CHECK11-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
11047 // CHECK11-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
11048 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
11049 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
11050 // CHECK11-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
11051 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11052 // CHECK11: omp.body.continue:
11053 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11054 // CHECK11: omp.inner.for.inc:
11055 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
11056 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
11057 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
11058 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
11059 // CHECK11: omp.inner.for.end:
11060 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11061 // CHECK11: omp.loop.exit:
11062 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11063 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11064 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11065 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11066 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11067 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11068 // CHECK11: .omp.final.then:
11069 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11070 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
11071 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
11072 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
11073 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
11074 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
11075 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11076 // CHECK11: .omp.final.done:
11077 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11078 // CHECK11: omp.precond.end:
11079 // CHECK11-NEXT: ret void
11080 //
11081 //
11082 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
11083 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11084 // CHECK11-NEXT: entry:
11085 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11086 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
11087 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
11088 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
11089 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
11090 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
11091 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
11092 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
11093 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11094 // CHECK11-NEXT: ret void
11095 //
11096 //
11097 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10
11098 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11099 // CHECK11-NEXT: entry:
11100 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11101 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11102 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11103 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11104 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11105 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11106 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11107 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11108 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11109 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11110 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11111 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11112 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11113 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11114 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11115 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
11116 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11117 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11118 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11119 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11120 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11121 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11122 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11123 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11124 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11125 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11126 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11127 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11128 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11129 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11130 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11131 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11132 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11133 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11134 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11135 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11136 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11137 // CHECK11: omp.precond.then:
11138 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11139 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11140 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11141 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11142 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11143 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11144 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11145 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11146 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11147 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11148 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11149 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11150 // CHECK11: cond.true:
11151 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11152 // CHECK11-NEXT: br label [[COND_END:%.*]]
11153 // CHECK11: cond.false:
11154 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11155 // CHECK11-NEXT: br label [[COND_END]]
11156 // CHECK11: cond.end:
11157 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11158 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11159 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11160 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11161 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11162 // CHECK11: omp.inner.for.cond:
11163 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
11164 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
11165 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11166 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11167 // CHECK11: omp.inner.for.body:
11168 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
11169 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
11170 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
11171 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11172 // CHECK11: omp.inner.for.inc:
11173 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
11174 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
11175 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
11176 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
11177 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
11178 // CHECK11: omp.inner.for.end:
11179 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11180 // CHECK11: omp.loop.exit:
11181 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11182 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
11183 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
11184 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11185 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11186 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11187 // CHECK11: .omp.final.then:
11188 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11189 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
11190 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11191 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11192 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11193 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
11194 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11195 // CHECK11: .omp.final.done:
11196 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11197 // CHECK11: omp.precond.end:
11198 // CHECK11-NEXT: ret void
11199 //
11200 //
11201 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11
11202 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11203 // CHECK11-NEXT: entry:
11204 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11205 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11206 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11207 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11208 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11209 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11210 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11211 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11212 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11213 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11214 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11215 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11216 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11217 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11218 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11219 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11220 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11221 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
11222 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11223 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11224 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11225 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11226 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11227 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11228 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11229 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11230 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11231 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11232 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11233 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11234 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11235 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11236 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11237 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11238 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11239 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11240 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11241 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11242 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11243 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11244 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11245 // CHECK11: omp.precond.then:
11246 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
11247 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11248 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11249 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11250 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11251 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11252 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11253 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11254 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11255 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11256 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11257 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11258 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11259 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11260 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11261 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11262 // CHECK11: cond.true:
11263 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11264 // CHECK11-NEXT: br label [[COND_END:%.*]]
11265 // CHECK11: cond.false:
11266 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11267 // CHECK11-NEXT: br label [[COND_END]]
11268 // CHECK11: cond.end:
11269 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11270 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11271 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11272 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11273 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11274 // CHECK11: omp.inner.for.cond:
11275 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
11276 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
11277 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11278 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11279 // CHECK11: omp.inner.for.body:
11280 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
11281 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11282 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11283 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
11284 // CHECK11-NEXT: [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
11285 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
11286 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
11287 // CHECK11-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
11288 // CHECK11-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
11289 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
11290 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
11291 // CHECK11-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
11292 // CHECK11-NEXT: [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
11293 // CHECK11-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
11294 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
11295 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
11296 // CHECK11-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
11297 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11298 // CHECK11: omp.body.continue:
11299 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11300 // CHECK11: omp.inner.for.inc:
11301 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
11302 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
11303 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
11304 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
11305 // CHECK11: omp.inner.for.end:
11306 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11307 // CHECK11: omp.loop.exit:
11308 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11309 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11310 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11311 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11312 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11313 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11314 // CHECK11: .omp.final.then:
11315 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11316 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
11317 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
11318 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
11319 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
11320 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
11321 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11322 // CHECK11: .omp.final.done:
11323 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11324 // CHECK11: omp.precond.end:
11325 // CHECK11-NEXT: ret void
11326 //
11327 //
11328 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
11329 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11330 // CHECK11-NEXT: entry:
11331 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
11332 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11333 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
11334 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
11335 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
11336 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
11337 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
11338 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
11339 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
11340 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
11341 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11342 // CHECK11-NEXT: ret void
11343 //
11344 //
11345 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14
11346 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11347 // CHECK11-NEXT: entry:
11348 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11349 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11350 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
11351 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11352 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11353 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11354 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11355 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11356 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11357 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11358 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11359 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11360 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11361 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11362 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11363 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11364 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11365 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
11366 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11367 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11368 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11369 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
11370 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11371 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11372 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11373 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11374 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
11375 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11376 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
11377 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
11378 // CHECK11-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
11379 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
11380 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
11381 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
11382 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11383 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11384 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
11385 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11386 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11387 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11388 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11389 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11390 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
11391 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11392 // CHECK11: omp.precond.then:
11393 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11394 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11395 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
11396 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11397 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11398 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11399 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11400 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11401 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11402 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11403 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11404 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11405 // CHECK11: cond.true:
11406 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11407 // CHECK11-NEXT: br label [[COND_END:%.*]]
11408 // CHECK11: cond.false:
11409 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11410 // CHECK11-NEXT: br label [[COND_END]]
11411 // CHECK11: cond.end:
11412 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11413 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11414 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11415 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11416 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11417 // CHECK11: omp.inner.for.cond:
11418 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11419 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
11420 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11421 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11422 // CHECK11: omp.inner.for.body:
11423 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
11424 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
11425 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
11426 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
11427 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
11428 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
11429 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11430 // CHECK11: omp.inner.for.inc:
11431 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11432 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
11433 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11434 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11435 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
11436 // CHECK11: omp.inner.for.end:
11437 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11438 // CHECK11: omp.loop.exit:
11439 // CHECK11-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11440 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
11441 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
11442 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11443 // CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
11444 // CHECK11-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11445 // CHECK11: .omp.final.then:
11446 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11447 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
11448 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11449 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
11450 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
11451 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
11452 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11453 // CHECK11: .omp.final.done:
11454 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11455 // CHECK11: omp.precond.end:
11456 // CHECK11-NEXT: ret void
11457 //
11458 //
11459 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15
11460 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
11461 // CHECK11-NEXT: entry:
11462 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11463 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11464 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11465 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11466 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11467 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11468 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11469 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11470 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11471 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11472 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11473 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11474 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11475 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11476 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11477 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11478 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11479 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11480 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
11481 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11482 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11483 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11484 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11485 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11486 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11487 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11488 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11489 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11490 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11491 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11492 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11493 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11494 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11495 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11496 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11497 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11498 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11499 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11500 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11501 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11502 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11503 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11504 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11505 // CHECK11: omp.precond.then:
11506 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
11507 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11508 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11509 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11510 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11511 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11512 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11513 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11514 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11515 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11516 // CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11517 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
11518 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
11519 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
11520 // CHECK11: omp.dispatch.cond:
11521 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11522 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11523 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
11524 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11525 // CHECK11: cond.true:
11526 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11527 // CHECK11-NEXT: br label [[COND_END:%.*]]
11528 // CHECK11: cond.false:
11529 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11530 // CHECK11-NEXT: br label [[COND_END]]
11531 // CHECK11: cond.end:
11532 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
11533 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11534 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11535 // CHECK11-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
11536 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11537 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11538 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
11539 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11540 // CHECK11: omp.dispatch.body:
11541 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11542 // CHECK11: omp.inner.for.cond:
11543 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11544 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
11545 // CHECK11-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
11546 // CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11547 // CHECK11: omp.inner.for.body:
11548 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11549 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
11550 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11551 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
11552 // CHECK11-NEXT: [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
11553 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11554 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
11555 // CHECK11-NEXT: [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
11556 // CHECK11-NEXT: [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
11557 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11558 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
11559 // CHECK11-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
11560 // CHECK11-NEXT: [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
11561 // CHECK11-NEXT: [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
11562 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11563 // CHECK11-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
11564 // CHECK11-NEXT: store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
11565 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11566 // CHECK11: omp.body.continue:
11567 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11568 // CHECK11: omp.inner.for.inc:
11569 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11570 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
11571 // CHECK11-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11572 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
11573 // CHECK11: omp.inner.for.end:
11574 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
11575 // CHECK11: omp.dispatch.inc:
11576 // CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11577 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11578 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
11579 // CHECK11-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
11580 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11581 // CHECK11-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11582 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
11583 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
11584 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
11585 // CHECK11: omp.dispatch.end:
11586 // CHECK11-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11587 // CHECK11-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
11588 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
11589 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11590 // CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
11591 // CHECK11-NEXT: br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11592 // CHECK11: .omp.final.then:
11593 // CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11594 // CHECK11-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
11595 // CHECK11-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
11596 // CHECK11-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
11597 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
11598 // CHECK11-NEXT: store i32 [[ADD17]], i32* [[I4]], align 4
11599 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11600 // CHECK11: .omp.final.done:
11601 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11602 // CHECK11: omp.precond.end:
11603 // CHECK11-NEXT: ret void
11604 //
11605 //
11606 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
11607 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11608 // CHECK11-NEXT: entry:
11609 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11610 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
11611 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
11612 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
11613 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
11614 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
11615 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
11616 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
11617 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11618 // CHECK11-NEXT: ret void
11619 //
11620 //
11621 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18
11622 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11623 // CHECK11-NEXT: entry:
11624 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11625 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11626 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11627 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11628 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11629 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11630 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11631 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11632 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11633 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11634 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11635 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11636 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11637 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11638 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11639 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
11640 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11641 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11642 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11643 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11644 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11645 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11646 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11647 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11648 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11649 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11650 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11651 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11652 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11653 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11654 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11655 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11656 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11657 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11658 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11659 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11660 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11661 // CHECK11: omp.precond.then:
11662 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11663 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11664 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11665 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11666 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11667 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11668 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11669 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11670 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11671 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11672 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11673 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11674 // CHECK11: cond.true:
11675 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11676 // CHECK11-NEXT: br label [[COND_END:%.*]]
11677 // CHECK11: cond.false:
11678 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11679 // CHECK11-NEXT: br label [[COND_END]]
11680 // CHECK11: cond.end:
11681 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11682 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11683 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11684 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11685 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11686 // CHECK11: omp.inner.for.cond:
11687 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11688 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
11689 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11690 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11691 // CHECK11: omp.inner.for.body:
11692 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
11693 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
11694 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
11695 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11696 // CHECK11: omp.inner.for.inc:
11697 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11698 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
11699 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
11700 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11701 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
11702 // CHECK11: omp.inner.for.end:
11703 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11704 // CHECK11: omp.loop.exit:
11705 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11706 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
11707 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
11708 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11709 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11710 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11711 // CHECK11: .omp.final.then:
11712 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11713 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
11714 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11715 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11716 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11717 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
11718 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11719 // CHECK11: .omp.final.done:
11720 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11721 // CHECK11: omp.precond.end:
11722 // CHECK11-NEXT: ret void
11723 //
11724 //
11725 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19
11726 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11727 // CHECK11-NEXT: entry:
11728 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11729 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11730 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11731 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11732 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11733 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11734 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11735 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11736 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11737 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11738 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11739 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11740 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11741 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11742 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11743 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11744 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11745 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
11746 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11747 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11748 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11749 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11750 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11751 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11752 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11753 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11754 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11755 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11756 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11757 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11758 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11759 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11760 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11761 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11762 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11763 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11764 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11765 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11766 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11767 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11768 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11769 // CHECK11: omp.precond.then:
11770 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
11771 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11772 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11773 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11774 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11775 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11776 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11777 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11778 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11779 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11780 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11781 // CHECK11-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11782 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
11783 // CHECK11-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
11784 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
11785 // CHECK11: omp.dispatch.cond:
11786 // CHECK11-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11787 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
11788 // CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11789 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
11790 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11791 // CHECK11: omp.dispatch.body:
11792 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11793 // CHECK11-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
11794 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11795 // CHECK11: omp.inner.for.cond:
11796 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11797 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
11798 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
11799 // CHECK11-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11800 // CHECK11: omp.inner.for.body:
11801 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11802 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
11803 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11804 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
11805 // CHECK11-NEXT: [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
11806 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11807 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
11808 // CHECK11-NEXT: [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
11809 // CHECK11-NEXT: [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
11810 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11811 // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
11812 // CHECK11-NEXT: [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
11813 // CHECK11-NEXT: [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
11814 // CHECK11-NEXT: [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
11815 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11816 // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
11817 // CHECK11-NEXT: store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
11818 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11819 // CHECK11: omp.body.continue:
11820 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11821 // CHECK11: omp.inner.for.inc:
11822 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11823 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
11824 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11825 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
11826 // CHECK11: omp.inner.for.end:
11827 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
11828 // CHECK11: omp.dispatch.inc:
11829 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
11830 // CHECK11: omp.dispatch.end:
11831 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11832 // CHECK11-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
11833 // CHECK11-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11834 // CHECK11: .omp.final.then:
11835 // CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11836 // CHECK11-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
11837 // CHECK11-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
11838 // CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
11839 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
11840 // CHECK11-NEXT: store i32 [[ADD12]], i32* [[I3]], align 4
11841 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11842 // CHECK11: .omp.final.done:
11843 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11844 // CHECK11: omp.precond.end:
11845 // CHECK11-NEXT: ret void
11846 //
11847 //
11848 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
11849 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11850 // CHECK11-NEXT: entry:
11851 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
11852 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11853 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double*, align 4
11854 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double*, align 4
11855 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double*, align 4
11856 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
11857 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
11858 // CHECK11-NEXT: store double* [[A]], double** [[A_ADDR]], align 4
11859 // CHECK11-NEXT: store double* [[B]], double** [[B_ADDR]], align 4
11860 // CHECK11-NEXT: store double* [[C]], double** [[C_ADDR]], align 4
11861 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11862 // CHECK11-NEXT: ret void
11863 //
11864 //
11865 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..22
11866 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11867 // CHECK11-NEXT: entry:
11868 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11869 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11870 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
11871 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11872 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11873 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11874 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11875 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11876 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11877 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11878 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11879 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11880 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11881 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11882 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11883 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11884 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11885 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
11886 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11887 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11888 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11889 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
11890 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
11891 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
11892 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
11893 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
11894 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
11895 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11896 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
11897 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
11898 // CHECK11-NEXT: [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
11899 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
11900 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
11901 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
11902 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11903 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11904 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
11905 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11906 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11907 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11908 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
11909 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11910 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
11911 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11912 // CHECK11: omp.precond.then:
11913 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11914 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11915 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
11916 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11917 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11918 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11919 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11920 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11921 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11922 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11923 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11924 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11925 // CHECK11: cond.true:
11926 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11927 // CHECK11-NEXT: br label [[COND_END:%.*]]
11928 // CHECK11: cond.false:
11929 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11930 // CHECK11-NEXT: br label [[COND_END]]
11931 // CHECK11: cond.end:
11932 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11933 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11934 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11935 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11936 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11937 // CHECK11: omp.inner.for.cond:
11938 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11939 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
11940 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11941 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11942 // CHECK11: omp.inner.for.body:
11943 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
11944 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
11945 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
11946 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
11947 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
11948 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
11949 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11950 // CHECK11: omp.inner.for.inc:
11951 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11952 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
11953 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11954 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11955 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
11956 // CHECK11: omp.inner.for.end:
11957 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11958 // CHECK11: omp.loop.exit:
11959 // CHECK11-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11960 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
11961 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
11962 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11963 // CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
11964 // CHECK11-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11965 // CHECK11: .omp.final.then:
11966 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11967 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
11968 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11969 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
11970 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
11971 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
11972 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
11973 // CHECK11: .omp.final.done:
11974 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
11975 // CHECK11: omp.precond.end:
11976 // CHECK11-NEXT: ret void
11977 //
11978 //
11979 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..23
11980 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
11981 // CHECK11-NEXT: entry:
11982 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11983 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11984 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11985 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11986 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
11987 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca double**, align 4
11988 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca double**, align 4
11989 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca double**, align 4
11990 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11991 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11992 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
11993 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11994 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11995 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
11996 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11997 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11998 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11999 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12000 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
12001 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12002 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12003 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12004 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12005 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
12006 // CHECK11-NEXT: store double** [[A]], double*** [[A_ADDR]], align 4
12007 // CHECK11-NEXT: store double** [[B]], double*** [[B_ADDR]], align 4
12008 // CHECK11-NEXT: store double** [[C]], double*** [[C_ADDR]], align 4
12009 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12010 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12011 // CHECK11-NEXT: [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
12012 // CHECK11-NEXT: [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
12013 // CHECK11-NEXT: [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
12014 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12015 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12016 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12017 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12018 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12019 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12020 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12021 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
12022 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12023 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12024 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12025 // CHECK11: omp.precond.then:
12026 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
12027 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12028 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12029 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12030 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12031 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
12032 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
12033 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12034 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12035 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12036 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12037 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12038 // CHECK11-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12039 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
12040 // CHECK11-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
12041 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
12042 // CHECK11: omp.dispatch.cond:
12043 // CHECK11-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12044 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
12045 // CHECK11-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
12046 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
12047 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12048 // CHECK11: omp.dispatch.body:
12049 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12050 // CHECK11-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
12051 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12052 // CHECK11: omp.inner.for.cond:
12053 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
12054 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
12055 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
12056 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12057 // CHECK11: omp.inner.for.body:
12058 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
12059 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
12060 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12061 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
12062 // CHECK11-NEXT: [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
12063 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
12064 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
12065 // CHECK11-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
12066 // CHECK11-NEXT: [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
12067 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
12068 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
12069 // CHECK11-NEXT: [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
12070 // CHECK11-NEXT: [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
12071 // CHECK11-NEXT: [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
12072 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
12073 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
12074 // CHECK11-NEXT: store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
12075 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12076 // CHECK11: omp.body.continue:
12077 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12078 // CHECK11: omp.inner.for.inc:
12079 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
12080 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
12081 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
12082 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
12083 // CHECK11: omp.inner.for.end:
12084 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
12085 // CHECK11: omp.dispatch.inc:
12086 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
12087 // CHECK11: omp.dispatch.end:
12088 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12089 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12090 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12091 // CHECK11: .omp.final.then:
12092 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12093 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
12094 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12095 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
12096 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
12097 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I4]], align 4
12098 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
12099 // CHECK11: .omp.final.done:
12100 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
12101 // CHECK11: omp.precond.end:
12102 // CHECK11-NEXT: ret void
12103 //
12104 //
12105 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
12106 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] comdat {
12107 // CHECK11-NEXT: entry:
12108 // CHECK11-NEXT: [[A:%.*]] = alloca i32*, align 4
12109 // CHECK11-NEXT: [[B:%.*]] = alloca i32*, align 4
12110 // CHECK11-NEXT: [[C:%.*]] = alloca i32*, align 4
12111 // CHECK11-NEXT: [[N:%.*]] = alloca i32, align 4
12112 // CHECK11-NEXT: [[CH:%.*]] = alloca i32, align 4
12113 // CHECK11-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
12114 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
12115 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
12116 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
12117 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
12118 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12119 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12120 // CHECK11-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
12121 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
12122 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
12123 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
12124 // CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4
12125 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
12126 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
12127 // CHECK11-NEXT: [[CH_CASTED:%.*]] = alloca i32, align 4
12128 // CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4
12129 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [5 x i8*], align 4
12130 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [5 x i8*], align 4
12131 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [5 x i8*], align 4
12132 // CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4
12133 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
12134 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4
12135 // CHECK11-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4
12136 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [4 x i8*], align 4
12137 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [4 x i8*], align 4
12138 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [4 x i8*], align 4
12139 // CHECK11-NEXT: [[_TMP35:%.*]] = alloca i32, align 4
12140 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4
12141 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4
12142 // CHECK11-NEXT: [[CH_CASTED45:%.*]] = alloca i32, align 4
12143 // CHECK11-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4
12144 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [5 x i8*], align 4
12145 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [5 x i8*], align 4
12146 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [5 x i8*], align 4
12147 // CHECK11-NEXT: [[_TMP50:%.*]] = alloca i32, align 4
12148 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
12149 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4
12150 // CHECK11-NEXT: [[N_CASTED60:%.*]] = alloca i32, align 4
12151 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS61:%.*]] = alloca [4 x i8*], align 4
12152 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS62:%.*]] = alloca [4 x i8*], align 4
12153 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS63:%.*]] = alloca [4 x i8*], align 4
12154 // CHECK11-NEXT: [[_TMP64:%.*]] = alloca i32, align 4
12155 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_65:%.*]] = alloca i32, align 4
12156 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_66:%.*]] = alloca i32, align 4
12157 // CHECK11-NEXT: [[CH_CASTED74:%.*]] = alloca i32, align 4
12158 // CHECK11-NEXT: [[N_CASTED75:%.*]] = alloca i32, align 4
12159 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS76:%.*]] = alloca [5 x i8*], align 4
12160 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS77:%.*]] = alloca [5 x i8*], align 4
12161 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS78:%.*]] = alloca [5 x i8*], align 4
12162 // CHECK11-NEXT: [[_TMP79:%.*]] = alloca i32, align 4
12163 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_80:%.*]] = alloca i32, align 4
12164 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_81:%.*]] = alloca i32, align 4
12165 // CHECK11-NEXT: store i32 10000, i32* [[N]], align 4
12166 // CHECK11-NEXT: store i32 100, i32* [[CH]], align 4
12167 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
12168 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[N_CASTED]], align 4
12169 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
12170 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
12171 // CHECK11-NEXT: [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
12172 // CHECK11-NEXT: [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
12173 // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12174 // CHECK11-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
12175 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4
12176 // CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12177 // CHECK11-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
12178 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4
12179 // CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12180 // CHECK11-NEXT: store i8* null, i8** [[TMP9]], align 4
12181 // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12182 // CHECK11-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
12183 // CHECK11-NEXT: store i32* [[TMP2]], i32** [[TMP11]], align 4
12184 // CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12185 // CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
12186 // CHECK11-NEXT: store i32* [[TMP2]], i32** [[TMP13]], align 4
12187 // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12188 // CHECK11-NEXT: store i8* null, i8** [[TMP14]], align 4
12189 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12190 // CHECK11-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
12191 // CHECK11-NEXT: store i32* [[TMP3]], i32** [[TMP16]], align 4
12192 // CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12193 // CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
12194 // CHECK11-NEXT: store i32* [[TMP3]], i32** [[TMP18]], align 4
12195 // CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12196 // CHECK11-NEXT: store i8* null, i8** [[TMP19]], align 4
12197 // CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
12198 // CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
12199 // CHECK11-NEXT: store i32* [[TMP4]], i32** [[TMP21]], align 4
12200 // CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
12201 // CHECK11-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
12202 // CHECK11-NEXT: store i32* [[TMP4]], i32** [[TMP23]], align 4
12203 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
12204 // CHECK11-NEXT: store i8* null, i8** [[TMP24]], align 4
12205 // CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12206 // CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12207 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
12208 // CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
12209 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12210 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
12211 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12212 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12213 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12214 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12215 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
12216 // CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
12217 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
12218 // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
12219 // CHECK11-NEXT: store i32 1, i32* [[TMP31]], align 4
12220 // CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
12221 // CHECK11-NEXT: store i32 4, i32* [[TMP32]], align 4
12222 // CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
12223 // CHECK11-NEXT: store i8** [[TMP25]], i8*** [[TMP33]], align 4
12224 // CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
12225 // CHECK11-NEXT: store i8** [[TMP26]], i8*** [[TMP34]], align 4
12226 // CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
12227 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64** [[TMP35]], align 4
12228 // CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
12229 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i64** [[TMP36]], align 4
12230 // CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
12231 // CHECK11-NEXT: store i8** null, i8*** [[TMP37]], align 4
12232 // CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
12233 // CHECK11-NEXT: store i8** null, i8*** [[TMP38]], align 4
12234 // CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
12235 // CHECK11-NEXT: store i64 [[TMP30]], i64* [[TMP39]], align 8
12236 // CHECK11-NEXT: [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
12237 // CHECK11-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
12238 // CHECK11-NEXT: br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12239 // CHECK11: omp_offload.failed:
12240 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
12241 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
12242 // CHECK11: omp_offload.cont:
12243 // CHECK11-NEXT: [[TMP42:%.*]] = load i32, i32* [[N]], align 4
12244 // CHECK11-NEXT: store i32 [[TMP42]], i32* [[N_CASTED3]], align 4
12245 // CHECK11-NEXT: [[TMP43:%.*]] = load i32, i32* [[N_CASTED3]], align 4
12246 // CHECK11-NEXT: [[TMP44:%.*]] = load i32*, i32** [[A]], align 4
12247 // CHECK11-NEXT: [[TMP45:%.*]] = load i32*, i32** [[B]], align 4
12248 // CHECK11-NEXT: [[TMP46:%.*]] = load i32*, i32** [[C]], align 4
12249 // CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
12250 // CHECK11-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32*
12251 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[TMP48]], align 4
12252 // CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
12253 // CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
12254 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[TMP50]], align 4
12255 // CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
12256 // CHECK11-NEXT: store i8* null, i8** [[TMP51]], align 4
12257 // CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
12258 // CHECK11-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32**
12259 // CHECK11-NEXT: store i32* [[TMP44]], i32** [[TMP53]], align 4
12260 // CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
12261 // CHECK11-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
12262 // CHECK11-NEXT: store i32* [[TMP44]], i32** [[TMP55]], align 4
12263 // CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
12264 // CHECK11-NEXT: store i8* null, i8** [[TMP56]], align 4
12265 // CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
12266 // CHECK11-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
12267 // CHECK11-NEXT: store i32* [[TMP45]], i32** [[TMP58]], align 4
12268 // CHECK11-NEXT: [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
12269 // CHECK11-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32**
12270 // CHECK11-NEXT: store i32* [[TMP45]], i32** [[TMP60]], align 4
12271 // CHECK11-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
12272 // CHECK11-NEXT: store i8* null, i8** [[TMP61]], align 4
12273 // CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
12274 // CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32**
12275 // CHECK11-NEXT: store i32* [[TMP46]], i32** [[TMP63]], align 4
12276 // CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
12277 // CHECK11-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32**
12278 // CHECK11-NEXT: store i32* [[TMP46]], i32** [[TMP65]], align 4
12279 // CHECK11-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
12280 // CHECK11-NEXT: store i8* null, i8** [[TMP66]], align 4
12281 // CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
12282 // CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
12283 // CHECK11-NEXT: [[TMP69:%.*]] = load i32, i32* [[N]], align 4
12284 // CHECK11-NEXT: store i32 [[TMP69]], i32* [[DOTCAPTURE_EXPR_8]], align 4
12285 // CHECK11-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
12286 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP70]], 0
12287 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12288 // CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
12289 // CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
12290 // CHECK11-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
12291 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP71]], 1
12292 // CHECK11-NEXT: [[TMP72:%.*]] = zext i32 [[ADD13]] to i64
12293 // CHECK11-NEXT: [[KERNEL_ARGS14:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12294 // CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 0
12295 // CHECK11-NEXT: store i32 1, i32* [[TMP73]], align 4
12296 // CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 1
12297 // CHECK11-NEXT: store i32 4, i32* [[TMP74]], align 4
12298 // CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 2
12299 // CHECK11-NEXT: store i8** [[TMP67]], i8*** [[TMP75]], align 4
12300 // CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 3
12301 // CHECK11-NEXT: store i8** [[TMP68]], i8*** [[TMP76]], align 4
12302 // CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 4
12303 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64** [[TMP77]], align 4
12304 // CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 5
12305 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i64** [[TMP78]], align 4
12306 // CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 6
12307 // CHECK11-NEXT: store i8** null, i8*** [[TMP79]], align 4
12308 // CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 7
12309 // CHECK11-NEXT: store i8** null, i8*** [[TMP80]], align 4
12310 // CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]], i32 0, i32 8
12311 // CHECK11-NEXT: store i64 [[TMP72]], i64* [[TMP81]], align 8
12312 // CHECK11-NEXT: [[TMP82:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS14]])
12313 // CHECK11-NEXT: [[TMP83:%.*]] = icmp ne i32 [[TMP82]], 0
12314 // CHECK11-NEXT: br i1 [[TMP83]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
12315 // CHECK11: omp_offload.failed15:
12316 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP43]], i32* [[TMP44]], i32* [[TMP45]], i32* [[TMP46]]) #[[ATTR2]]
12317 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]]
12318 // CHECK11: omp_offload.cont16:
12319 // CHECK11-NEXT: [[TMP84:%.*]] = load i32, i32* [[CH]], align 4
12320 // CHECK11-NEXT: store i32 [[TMP84]], i32* [[CH_CASTED]], align 4
12321 // CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[CH_CASTED]], align 4
12322 // CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[N]], align 4
12323 // CHECK11-NEXT: store i32 [[TMP86]], i32* [[N_CASTED17]], align 4
12324 // CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[N_CASTED17]], align 4
12325 // CHECK11-NEXT: [[TMP88:%.*]] = load i32*, i32** [[A]], align 4
12326 // CHECK11-NEXT: [[TMP89:%.*]] = load i32*, i32** [[B]], align 4
12327 // CHECK11-NEXT: [[TMP90:%.*]] = load i32*, i32** [[C]], align 4
12328 // CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0
12329 // CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
12330 // CHECK11-NEXT: store i32 [[TMP85]], i32* [[TMP92]], align 4
12331 // CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0
12332 // CHECK11-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
12333 // CHECK11-NEXT: store i32 [[TMP85]], i32* [[TMP94]], align 4
12334 // CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0
12335 // CHECK11-NEXT: store i8* null, i8** [[TMP95]], align 4
12336 // CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1
12337 // CHECK11-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32*
12338 // CHECK11-NEXT: store i32 [[TMP87]], i32* [[TMP97]], align 4
12339 // CHECK11-NEXT: [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1
12340 // CHECK11-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
12341 // CHECK11-NEXT: store i32 [[TMP87]], i32* [[TMP99]], align 4
12342 // CHECK11-NEXT: [[TMP100:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1
12343 // CHECK11-NEXT: store i8* null, i8** [[TMP100]], align 4
12344 // CHECK11-NEXT: [[TMP101:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2
12345 // CHECK11-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32**
12346 // CHECK11-NEXT: store i32* [[TMP88]], i32** [[TMP102]], align 4
12347 // CHECK11-NEXT: [[TMP103:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2
12348 // CHECK11-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32**
12349 // CHECK11-NEXT: store i32* [[TMP88]], i32** [[TMP104]], align 4
12350 // CHECK11-NEXT: [[TMP105:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2
12351 // CHECK11-NEXT: store i8* null, i8** [[TMP105]], align 4
12352 // CHECK11-NEXT: [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3
12353 // CHECK11-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32**
12354 // CHECK11-NEXT: store i32* [[TMP89]], i32** [[TMP107]], align 4
12355 // CHECK11-NEXT: [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3
12356 // CHECK11-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32**
12357 // CHECK11-NEXT: store i32* [[TMP89]], i32** [[TMP109]], align 4
12358 // CHECK11-NEXT: [[TMP110:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3
12359 // CHECK11-NEXT: store i8* null, i8** [[TMP110]], align 4
12360 // CHECK11-NEXT: [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 4
12361 // CHECK11-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32**
12362 // CHECK11-NEXT: store i32* [[TMP90]], i32** [[TMP112]], align 4
12363 // CHECK11-NEXT: [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 4
12364 // CHECK11-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32**
12365 // CHECK11-NEXT: store i32* [[TMP90]], i32** [[TMP114]], align 4
12366 // CHECK11-NEXT: [[TMP115:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 4
12367 // CHECK11-NEXT: store i8* null, i8** [[TMP115]], align 4
12368 // CHECK11-NEXT: [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0
12369 // CHECK11-NEXT: [[TMP117:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0
12370 // CHECK11-NEXT: [[TMP118:%.*]] = load i32, i32* [[N]], align 4
12371 // CHECK11-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_22]], align 4
12372 // CHECK11-NEXT: [[TMP119:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
12373 // CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP119]], 0
12374 // CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1
12375 // CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1
12376 // CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4
12377 // CHECK11-NEXT: [[TMP120:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4
12378 // CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP120]], 1
12379 // CHECK11-NEXT: [[TMP121:%.*]] = zext i32 [[ADD27]] to i64
12380 // CHECK11-NEXT: [[KERNEL_ARGS28:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12381 // CHECK11-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 0
12382 // CHECK11-NEXT: store i32 1, i32* [[TMP122]], align 4
12383 // CHECK11-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 1
12384 // CHECK11-NEXT: store i32 5, i32* [[TMP123]], align 4
12385 // CHECK11-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 2
12386 // CHECK11-NEXT: store i8** [[TMP116]], i8*** [[TMP124]], align 4
12387 // CHECK11-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 3
12388 // CHECK11-NEXT: store i8** [[TMP117]], i8*** [[TMP125]], align 4
12389 // CHECK11-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 4
12390 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64** [[TMP126]], align 4
12391 // CHECK11-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 5
12392 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i64** [[TMP127]], align 4
12393 // CHECK11-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 6
12394 // CHECK11-NEXT: store i8** null, i8*** [[TMP128]], align 4
12395 // CHECK11-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 7
12396 // CHECK11-NEXT: store i8** null, i8*** [[TMP129]], align 4
12397 // CHECK11-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]], i32 0, i32 8
12398 // CHECK11-NEXT: store i64 [[TMP121]], i64* [[TMP130]], align 8
12399 // CHECK11-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS28]])
12400 // CHECK11-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0
12401 // CHECK11-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]]
12402 // CHECK11: omp_offload.failed29:
12403 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP85]], i32 [[TMP87]], i32* [[TMP88]], i32* [[TMP89]], i32* [[TMP90]]) #[[ATTR2]]
12404 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]]
12405 // CHECK11: omp_offload.cont30:
12406 // CHECK11-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
12407 // CHECK11-NEXT: store i32 [[TMP133]], i32* [[N_CASTED31]], align 4
12408 // CHECK11-NEXT: [[TMP134:%.*]] = load i32, i32* [[N_CASTED31]], align 4
12409 // CHECK11-NEXT: [[TMP135:%.*]] = load i32*, i32** [[A]], align 4
12410 // CHECK11-NEXT: [[TMP136:%.*]] = load i32*, i32** [[B]], align 4
12411 // CHECK11-NEXT: [[TMP137:%.*]] = load i32*, i32** [[C]], align 4
12412 // CHECK11-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0
12413 // CHECK11-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32*
12414 // CHECK11-NEXT: store i32 [[TMP134]], i32* [[TMP139]], align 4
12415 // CHECK11-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0
12416 // CHECK11-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32*
12417 // CHECK11-NEXT: store i32 [[TMP134]], i32* [[TMP141]], align 4
12418 // CHECK11-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0
12419 // CHECK11-NEXT: store i8* null, i8** [[TMP142]], align 4
12420 // CHECK11-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1
12421 // CHECK11-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32**
12422 // CHECK11-NEXT: store i32* [[TMP135]], i32** [[TMP144]], align 4
12423 // CHECK11-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1
12424 // CHECK11-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32**
12425 // CHECK11-NEXT: store i32* [[TMP135]], i32** [[TMP146]], align 4
12426 // CHECK11-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1
12427 // CHECK11-NEXT: store i8* null, i8** [[TMP147]], align 4
12428 // CHECK11-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2
12429 // CHECK11-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32**
12430 // CHECK11-NEXT: store i32* [[TMP136]], i32** [[TMP149]], align 4
12431 // CHECK11-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2
12432 // CHECK11-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
12433 // CHECK11-NEXT: store i32* [[TMP136]], i32** [[TMP151]], align 4
12434 // CHECK11-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2
12435 // CHECK11-NEXT: store i8* null, i8** [[TMP152]], align 4
12436 // CHECK11-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 3
12437 // CHECK11-NEXT: [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32**
12438 // CHECK11-NEXT: store i32* [[TMP137]], i32** [[TMP154]], align 4
12439 // CHECK11-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 3
12440 // CHECK11-NEXT: [[TMP156:%.*]] = bitcast i8** [[TMP155]] to i32**
12441 // CHECK11-NEXT: store i32* [[TMP137]], i32** [[TMP156]], align 4
12442 // CHECK11-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 3
12443 // CHECK11-NEXT: store i8* null, i8** [[TMP157]], align 4
12444 // CHECK11-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0
12445 // CHECK11-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0
12446 // CHECK11-NEXT: [[TMP160:%.*]] = load i32, i32* [[N]], align 4
12447 // CHECK11-NEXT: store i32 [[TMP160]], i32* [[DOTCAPTURE_EXPR_36]], align 4
12448 // CHECK11-NEXT: [[TMP161:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4
12449 // CHECK11-NEXT: [[SUB38:%.*]] = sub nsw i32 [[TMP161]], 0
12450 // CHECK11-NEXT: [[DIV39:%.*]] = sdiv i32 [[SUB38]], 1
12451 // CHECK11-NEXT: [[SUB40:%.*]] = sub nsw i32 [[DIV39]], 1
12452 // CHECK11-NEXT: store i32 [[SUB40]], i32* [[DOTCAPTURE_EXPR_37]], align 4
12453 // CHECK11-NEXT: [[TMP162:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4
12454 // CHECK11-NEXT: [[ADD41:%.*]] = add nsw i32 [[TMP162]], 1
12455 // CHECK11-NEXT: [[TMP163:%.*]] = zext i32 [[ADD41]] to i64
12456 // CHECK11-NEXT: [[KERNEL_ARGS42:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12457 // CHECK11-NEXT: [[TMP164:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 0
12458 // CHECK11-NEXT: store i32 1, i32* [[TMP164]], align 4
12459 // CHECK11-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 1
12460 // CHECK11-NEXT: store i32 4, i32* [[TMP165]], align 4
12461 // CHECK11-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 2
12462 // CHECK11-NEXT: store i8** [[TMP158]], i8*** [[TMP166]], align 4
12463 // CHECK11-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 3
12464 // CHECK11-NEXT: store i8** [[TMP159]], i8*** [[TMP167]], align 4
12465 // CHECK11-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 4
12466 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64** [[TMP168]], align 4
12467 // CHECK11-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 5
12468 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i64** [[TMP169]], align 4
12469 // CHECK11-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 6
12470 // CHECK11-NEXT: store i8** null, i8*** [[TMP170]], align 4
12471 // CHECK11-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 7
12472 // CHECK11-NEXT: store i8** null, i8*** [[TMP171]], align 4
12473 // CHECK11-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]], i32 0, i32 8
12474 // CHECK11-NEXT: store i64 [[TMP163]], i64* [[TMP172]], align 8
12475 // CHECK11-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS42]])
12476 // CHECK11-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
12477 // CHECK11-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]]
12478 // CHECK11: omp_offload.failed43:
12479 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP134]], i32* [[TMP135]], i32* [[TMP136]], i32* [[TMP137]]) #[[ATTR2]]
12480 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT44]]
12481 // CHECK11: omp_offload.cont44:
12482 // CHECK11-NEXT: [[TMP175:%.*]] = load i32, i32* [[CH]], align 4
12483 // CHECK11-NEXT: store i32 [[TMP175]], i32* [[CH_CASTED45]], align 4
12484 // CHECK11-NEXT: [[TMP176:%.*]] = load i32, i32* [[CH_CASTED45]], align 4
12485 // CHECK11-NEXT: [[TMP177:%.*]] = load i32, i32* [[N]], align 4
12486 // CHECK11-NEXT: store i32 [[TMP177]], i32* [[N_CASTED46]], align 4
12487 // CHECK11-NEXT: [[TMP178:%.*]] = load i32, i32* [[N_CASTED46]], align 4
12488 // CHECK11-NEXT: [[TMP179:%.*]] = load i32*, i32** [[A]], align 4
12489 // CHECK11-NEXT: [[TMP180:%.*]] = load i32*, i32** [[B]], align 4
12490 // CHECK11-NEXT: [[TMP181:%.*]] = load i32*, i32** [[C]], align 4
12491 // CHECK11-NEXT: [[TMP182:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0
12492 // CHECK11-NEXT: [[TMP183:%.*]] = bitcast i8** [[TMP182]] to i32*
12493 // CHECK11-NEXT: store i32 [[TMP176]], i32* [[TMP183]], align 4
12494 // CHECK11-NEXT: [[TMP184:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0
12495 // CHECK11-NEXT: [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
12496 // CHECK11-NEXT: store i32 [[TMP176]], i32* [[TMP185]], align 4
12497 // CHECK11-NEXT: [[TMP186:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0
12498 // CHECK11-NEXT: store i8* null, i8** [[TMP186]], align 4
12499 // CHECK11-NEXT: [[TMP187:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1
12500 // CHECK11-NEXT: [[TMP188:%.*]] = bitcast i8** [[TMP187]] to i32*
12501 // CHECK11-NEXT: store i32 [[TMP178]], i32* [[TMP188]], align 4
12502 // CHECK11-NEXT: [[TMP189:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1
12503 // CHECK11-NEXT: [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32*
12504 // CHECK11-NEXT: store i32 [[TMP178]], i32* [[TMP190]], align 4
12505 // CHECK11-NEXT: [[TMP191:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1
12506 // CHECK11-NEXT: store i8* null, i8** [[TMP191]], align 4
12507 // CHECK11-NEXT: [[TMP192:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2
12508 // CHECK11-NEXT: [[TMP193:%.*]] = bitcast i8** [[TMP192]] to i32**
12509 // CHECK11-NEXT: store i32* [[TMP179]], i32** [[TMP193]], align 4
12510 // CHECK11-NEXT: [[TMP194:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2
12511 // CHECK11-NEXT: [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
12512 // CHECK11-NEXT: store i32* [[TMP179]], i32** [[TMP195]], align 4
12513 // CHECK11-NEXT: [[TMP196:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2
12514 // CHECK11-NEXT: store i8* null, i8** [[TMP196]], align 4
12515 // CHECK11-NEXT: [[TMP197:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3
12516 // CHECK11-NEXT: [[TMP198:%.*]] = bitcast i8** [[TMP197]] to i32**
12517 // CHECK11-NEXT: store i32* [[TMP180]], i32** [[TMP198]], align 4
12518 // CHECK11-NEXT: [[TMP199:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3
12519 // CHECK11-NEXT: [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
12520 // CHECK11-NEXT: store i32* [[TMP180]], i32** [[TMP200]], align 4
12521 // CHECK11-NEXT: [[TMP201:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3
12522 // CHECK11-NEXT: store i8* null, i8** [[TMP201]], align 4
12523 // CHECK11-NEXT: [[TMP202:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 4
12524 // CHECK11-NEXT: [[TMP203:%.*]] = bitcast i8** [[TMP202]] to i32**
12525 // CHECK11-NEXT: store i32* [[TMP181]], i32** [[TMP203]], align 4
12526 // CHECK11-NEXT: [[TMP204:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 4
12527 // CHECK11-NEXT: [[TMP205:%.*]] = bitcast i8** [[TMP204]] to i32**
12528 // CHECK11-NEXT: store i32* [[TMP181]], i32** [[TMP205]], align 4
12529 // CHECK11-NEXT: [[TMP206:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 4
12530 // CHECK11-NEXT: store i8* null, i8** [[TMP206]], align 4
12531 // CHECK11-NEXT: [[TMP207:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0
12532 // CHECK11-NEXT: [[TMP208:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0
12533 // CHECK11-NEXT: [[TMP209:%.*]] = load i32, i32* [[N]], align 4
12534 // CHECK11-NEXT: store i32 [[TMP209]], i32* [[DOTCAPTURE_EXPR_51]], align 4
12535 // CHECK11-NEXT: [[TMP210:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
12536 // CHECK11-NEXT: [[SUB53:%.*]] = sub nsw i32 [[TMP210]], 0
12537 // CHECK11-NEXT: [[DIV54:%.*]] = sdiv i32 [[SUB53]], 1
12538 // CHECK11-NEXT: [[SUB55:%.*]] = sub nsw i32 [[DIV54]], 1
12539 // CHECK11-NEXT: store i32 [[SUB55]], i32* [[DOTCAPTURE_EXPR_52]], align 4
12540 // CHECK11-NEXT: [[TMP211:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4
12541 // CHECK11-NEXT: [[ADD56:%.*]] = add nsw i32 [[TMP211]], 1
12542 // CHECK11-NEXT: [[TMP212:%.*]] = zext i32 [[ADD56]] to i64
12543 // CHECK11-NEXT: [[KERNEL_ARGS57:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12544 // CHECK11-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 0
12545 // CHECK11-NEXT: store i32 1, i32* [[TMP213]], align 4
12546 // CHECK11-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 1
12547 // CHECK11-NEXT: store i32 5, i32* [[TMP214]], align 4
12548 // CHECK11-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 2
12549 // CHECK11-NEXT: store i8** [[TMP207]], i8*** [[TMP215]], align 4
12550 // CHECK11-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 3
12551 // CHECK11-NEXT: store i8** [[TMP208]], i8*** [[TMP216]], align 4
12552 // CHECK11-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 4
12553 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64** [[TMP217]], align 4
12554 // CHECK11-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 5
12555 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i64** [[TMP218]], align 4
12556 // CHECK11-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 6
12557 // CHECK11-NEXT: store i8** null, i8*** [[TMP219]], align 4
12558 // CHECK11-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 7
12559 // CHECK11-NEXT: store i8** null, i8*** [[TMP220]], align 4
12560 // CHECK11-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]], i32 0, i32 8
12561 // CHECK11-NEXT: store i64 [[TMP212]], i64* [[TMP221]], align 8
12562 // CHECK11-NEXT: [[TMP222:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS57]])
12563 // CHECK11-NEXT: [[TMP223:%.*]] = icmp ne i32 [[TMP222]], 0
12564 // CHECK11-NEXT: br i1 [[TMP223]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]]
12565 // CHECK11: omp_offload.failed58:
12566 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP176]], i32 [[TMP178]], i32* [[TMP179]], i32* [[TMP180]], i32* [[TMP181]]) #[[ATTR2]]
12567 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT59]]
12568 // CHECK11: omp_offload.cont59:
12569 // CHECK11-NEXT: [[TMP224:%.*]] = load i32, i32* [[N]], align 4
12570 // CHECK11-NEXT: store i32 [[TMP224]], i32* [[N_CASTED60]], align 4
12571 // CHECK11-NEXT: [[TMP225:%.*]] = load i32, i32* [[N_CASTED60]], align 4
12572 // CHECK11-NEXT: [[TMP226:%.*]] = load i32*, i32** [[A]], align 4
12573 // CHECK11-NEXT: [[TMP227:%.*]] = load i32*, i32** [[B]], align 4
12574 // CHECK11-NEXT: [[TMP228:%.*]] = load i32*, i32** [[C]], align 4
12575 // CHECK11-NEXT: [[TMP229:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 0
12576 // CHECK11-NEXT: [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32*
12577 // CHECK11-NEXT: store i32 [[TMP225]], i32* [[TMP230]], align 4
12578 // CHECK11-NEXT: [[TMP231:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 0
12579 // CHECK11-NEXT: [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32*
12580 // CHECK11-NEXT: store i32 [[TMP225]], i32* [[TMP232]], align 4
12581 // CHECK11-NEXT: [[TMP233:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 0
12582 // CHECK11-NEXT: store i8* null, i8** [[TMP233]], align 4
12583 // CHECK11-NEXT: [[TMP234:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 1
12584 // CHECK11-NEXT: [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
12585 // CHECK11-NEXT: store i32* [[TMP226]], i32** [[TMP235]], align 4
12586 // CHECK11-NEXT: [[TMP236:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 1
12587 // CHECK11-NEXT: [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
12588 // CHECK11-NEXT: store i32* [[TMP226]], i32** [[TMP237]], align 4
12589 // CHECK11-NEXT: [[TMP238:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 1
12590 // CHECK11-NEXT: store i8* null, i8** [[TMP238]], align 4
12591 // CHECK11-NEXT: [[TMP239:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 2
12592 // CHECK11-NEXT: [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
12593 // CHECK11-NEXT: store i32* [[TMP227]], i32** [[TMP240]], align 4
12594 // CHECK11-NEXT: [[TMP241:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 2
12595 // CHECK11-NEXT: [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
12596 // CHECK11-NEXT: store i32* [[TMP227]], i32** [[TMP242]], align 4
12597 // CHECK11-NEXT: [[TMP243:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 2
12598 // CHECK11-NEXT: store i8* null, i8** [[TMP243]], align 4
12599 // CHECK11-NEXT: [[TMP244:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 3
12600 // CHECK11-NEXT: [[TMP245:%.*]] = bitcast i8** [[TMP244]] to i32**
12601 // CHECK11-NEXT: store i32* [[TMP228]], i32** [[TMP245]], align 4
12602 // CHECK11-NEXT: [[TMP246:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 3
12603 // CHECK11-NEXT: [[TMP247:%.*]] = bitcast i8** [[TMP246]] to i32**
12604 // CHECK11-NEXT: store i32* [[TMP228]], i32** [[TMP247]], align 4
12605 // CHECK11-NEXT: [[TMP248:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS63]], i32 0, i32 3
12606 // CHECK11-NEXT: store i8* null, i8** [[TMP248]], align 4
12607 // CHECK11-NEXT: [[TMP249:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS61]], i32 0, i32 0
12608 // CHECK11-NEXT: [[TMP250:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS62]], i32 0, i32 0
12609 // CHECK11-NEXT: [[TMP251:%.*]] = load i32, i32* [[N]], align 4
12610 // CHECK11-NEXT: store i32 [[TMP251]], i32* [[DOTCAPTURE_EXPR_65]], align 4
12611 // CHECK11-NEXT: [[TMP252:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_65]], align 4
12612 // CHECK11-NEXT: [[SUB67:%.*]] = sub nsw i32 [[TMP252]], 0
12613 // CHECK11-NEXT: [[DIV68:%.*]] = sdiv i32 [[SUB67]], 1
12614 // CHECK11-NEXT: [[SUB69:%.*]] = sub nsw i32 [[DIV68]], 1
12615 // CHECK11-NEXT: store i32 [[SUB69]], i32* [[DOTCAPTURE_EXPR_66]], align 4
12616 // CHECK11-NEXT: [[TMP253:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_66]], align 4
12617 // CHECK11-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP253]], 1
12618 // CHECK11-NEXT: [[TMP254:%.*]] = zext i32 [[ADD70]] to i64
12619 // CHECK11-NEXT: [[KERNEL_ARGS71:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12620 // CHECK11-NEXT: [[TMP255:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 0
12621 // CHECK11-NEXT: store i32 1, i32* [[TMP255]], align 4
12622 // CHECK11-NEXT: [[TMP256:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 1
12623 // CHECK11-NEXT: store i32 4, i32* [[TMP256]], align 4
12624 // CHECK11-NEXT: [[TMP257:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 2
12625 // CHECK11-NEXT: store i8** [[TMP249]], i8*** [[TMP257]], align 4
12626 // CHECK11-NEXT: [[TMP258:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 3
12627 // CHECK11-NEXT: store i8** [[TMP250]], i8*** [[TMP258]], align 4
12628 // CHECK11-NEXT: [[TMP259:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 4
12629 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64** [[TMP259]], align 4
12630 // CHECK11-NEXT: [[TMP260:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 5
12631 // CHECK11-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i64** [[TMP260]], align 4
12632 // CHECK11-NEXT: [[TMP261:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 6
12633 // CHECK11-NEXT: store i8** null, i8*** [[TMP261]], align 4
12634 // CHECK11-NEXT: [[TMP262:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 7
12635 // CHECK11-NEXT: store i8** null, i8*** [[TMP262]], align 4
12636 // CHECK11-NEXT: [[TMP263:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]], i32 0, i32 8
12637 // CHECK11-NEXT: store i64 [[TMP254]], i64* [[TMP263]], align 8
12638 // CHECK11-NEXT: [[TMP264:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS71]])
12639 // CHECK11-NEXT: [[TMP265:%.*]] = icmp ne i32 [[TMP264]], 0
12640 // CHECK11-NEXT: br i1 [[TMP265]], label [[OMP_OFFLOAD_FAILED72:%.*]], label [[OMP_OFFLOAD_CONT73:%.*]]
12641 // CHECK11: omp_offload.failed72:
12642 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP225]], i32* [[TMP226]], i32* [[TMP227]], i32* [[TMP228]]) #[[ATTR2]]
12643 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT73]]
12644 // CHECK11: omp_offload.cont73:
12645 // CHECK11-NEXT: [[TMP266:%.*]] = load i32, i32* [[CH]], align 4
12646 // CHECK11-NEXT: store i32 [[TMP266]], i32* [[CH_CASTED74]], align 4
12647 // CHECK11-NEXT: [[TMP267:%.*]] = load i32, i32* [[CH_CASTED74]], align 4
12648 // CHECK11-NEXT: [[TMP268:%.*]] = load i32, i32* [[N]], align 4
12649 // CHECK11-NEXT: store i32 [[TMP268]], i32* [[N_CASTED75]], align 4
12650 // CHECK11-NEXT: [[TMP269:%.*]] = load i32, i32* [[N_CASTED75]], align 4
12651 // CHECK11-NEXT: [[TMP270:%.*]] = load i32*, i32** [[A]], align 4
12652 // CHECK11-NEXT: [[TMP271:%.*]] = load i32*, i32** [[B]], align 4
12653 // CHECK11-NEXT: [[TMP272:%.*]] = load i32*, i32** [[C]], align 4
12654 // CHECK11-NEXT: [[TMP273:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 0
12655 // CHECK11-NEXT: [[TMP274:%.*]] = bitcast i8** [[TMP273]] to i32*
12656 // CHECK11-NEXT: store i32 [[TMP267]], i32* [[TMP274]], align 4
12657 // CHECK11-NEXT: [[TMP275:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 0
12658 // CHECK11-NEXT: [[TMP276:%.*]] = bitcast i8** [[TMP275]] to i32*
12659 // CHECK11-NEXT: store i32 [[TMP267]], i32* [[TMP276]], align 4
12660 // CHECK11-NEXT: [[TMP277:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 0
12661 // CHECK11-NEXT: store i8* null, i8** [[TMP277]], align 4
12662 // CHECK11-NEXT: [[TMP278:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 1
12663 // CHECK11-NEXT: [[TMP279:%.*]] = bitcast i8** [[TMP278]] to i32*
12664 // CHECK11-NEXT: store i32 [[TMP269]], i32* [[TMP279]], align 4
12665 // CHECK11-NEXT: [[TMP280:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 1
12666 // CHECK11-NEXT: [[TMP281:%.*]] = bitcast i8** [[TMP280]] to i32*
12667 // CHECK11-NEXT: store i32 [[TMP269]], i32* [[TMP281]], align 4
12668 // CHECK11-NEXT: [[TMP282:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 1
12669 // CHECK11-NEXT: store i8* null, i8** [[TMP282]], align 4
12670 // CHECK11-NEXT: [[TMP283:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 2
12671 // CHECK11-NEXT: [[TMP284:%.*]] = bitcast i8** [[TMP283]] to i32**
12672 // CHECK11-NEXT: store i32* [[TMP270]], i32** [[TMP284]], align 4
12673 // CHECK11-NEXT: [[TMP285:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 2
12674 // CHECK11-NEXT: [[TMP286:%.*]] = bitcast i8** [[TMP285]] to i32**
12675 // CHECK11-NEXT: store i32* [[TMP270]], i32** [[TMP286]], align 4
12676 // CHECK11-NEXT: [[TMP287:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 2
12677 // CHECK11-NEXT: store i8* null, i8** [[TMP287]], align 4
12678 // CHECK11-NEXT: [[TMP288:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 3
12679 // CHECK11-NEXT: [[TMP289:%.*]] = bitcast i8** [[TMP288]] to i32**
12680 // CHECK11-NEXT: store i32* [[TMP271]], i32** [[TMP289]], align 4
12681 // CHECK11-NEXT: [[TMP290:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 3
12682 // CHECK11-NEXT: [[TMP291:%.*]] = bitcast i8** [[TMP290]] to i32**
12683 // CHECK11-NEXT: store i32* [[TMP271]], i32** [[TMP291]], align 4
12684 // CHECK11-NEXT: [[TMP292:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 3
12685 // CHECK11-NEXT: store i8* null, i8** [[TMP292]], align 4
12686 // CHECK11-NEXT: [[TMP293:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 4
12687 // CHECK11-NEXT: [[TMP294:%.*]] = bitcast i8** [[TMP293]] to i32**
12688 // CHECK11-NEXT: store i32* [[TMP272]], i32** [[TMP294]], align 4
12689 // CHECK11-NEXT: [[TMP295:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 4
12690 // CHECK11-NEXT: [[TMP296:%.*]] = bitcast i8** [[TMP295]] to i32**
12691 // CHECK11-NEXT: store i32* [[TMP272]], i32** [[TMP296]], align 4
12692 // CHECK11-NEXT: [[TMP297:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS78]], i32 0, i32 4
12693 // CHECK11-NEXT: store i8* null, i8** [[TMP297]], align 4
12694 // CHECK11-NEXT: [[TMP298:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS76]], i32 0, i32 0
12695 // CHECK11-NEXT: [[TMP299:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS77]], i32 0, i32 0
12696 // CHECK11-NEXT: [[TMP300:%.*]] = load i32, i32* [[N]], align 4
12697 // CHECK11-NEXT: store i32 [[TMP300]], i32* [[DOTCAPTURE_EXPR_80]], align 4
12698 // CHECK11-NEXT: [[TMP301:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_80]], align 4
12699 // CHECK11-NEXT: [[SUB82:%.*]] = sub nsw i32 [[TMP301]], 0
12700 // CHECK11-NEXT: [[DIV83:%.*]] = sdiv i32 [[SUB82]], 1
12701 // CHECK11-NEXT: [[SUB84:%.*]] = sub nsw i32 [[DIV83]], 1
12702 // CHECK11-NEXT: store i32 [[SUB84]], i32* [[DOTCAPTURE_EXPR_81]], align 4
12703 // CHECK11-NEXT: [[TMP302:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_81]], align 4
12704 // CHECK11-NEXT: [[ADD85:%.*]] = add nsw i32 [[TMP302]], 1
12705 // CHECK11-NEXT: [[TMP303:%.*]] = zext i32 [[ADD85]] to i64
12706 // CHECK11-NEXT: [[KERNEL_ARGS86:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12707 // CHECK11-NEXT: [[TMP304:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 0
12708 // CHECK11-NEXT: store i32 1, i32* [[TMP304]], align 4
12709 // CHECK11-NEXT: [[TMP305:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 1
12710 // CHECK11-NEXT: store i32 5, i32* [[TMP305]], align 4
12711 // CHECK11-NEXT: [[TMP306:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 2
12712 // CHECK11-NEXT: store i8** [[TMP298]], i8*** [[TMP306]], align 4
12713 // CHECK11-NEXT: [[TMP307:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 3
12714 // CHECK11-NEXT: store i8** [[TMP299]], i8*** [[TMP307]], align 4
12715 // CHECK11-NEXT: [[TMP308:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 4
12716 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64** [[TMP308]], align 4
12717 // CHECK11-NEXT: [[TMP309:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 5
12718 // CHECK11-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i64** [[TMP309]], align 4
12719 // CHECK11-NEXT: [[TMP310:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 6
12720 // CHECK11-NEXT: store i8** null, i8*** [[TMP310]], align 4
12721 // CHECK11-NEXT: [[TMP311:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 7
12722 // CHECK11-NEXT: store i8** null, i8*** [[TMP311]], align 4
12723 // CHECK11-NEXT: [[TMP312:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]], i32 0, i32 8
12724 // CHECK11-NEXT: store i64 [[TMP303]], i64* [[TMP312]], align 8
12725 // CHECK11-NEXT: [[TMP313:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS86]])
12726 // CHECK11-NEXT: [[TMP314:%.*]] = icmp ne i32 [[TMP313]], 0
12727 // CHECK11-NEXT: br i1 [[TMP314]], label [[OMP_OFFLOAD_FAILED87:%.*]], label [[OMP_OFFLOAD_CONT88:%.*]]
12728 // CHECK11: omp_offload.failed87:
12729 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP267]], i32 [[TMP269]], i32* [[TMP270]], i32* [[TMP271]], i32* [[TMP272]]) #[[ATTR2]]
12730 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT88]]
12731 // CHECK11: omp_offload.cont88:
12732 // CHECK11-NEXT: ret i32 0
12733 //
12734 //
12735 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
12736 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
12737 // CHECK11-NEXT: entry:
12738 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
12739 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
12740 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
12741 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
12742 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
12743 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
12744 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
12745 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
12746 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12747 // CHECK11-NEXT: ret void
12748 //
12749 //
12750 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..26
12751 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12752 // CHECK11-NEXT: entry:
12753 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12754 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12755 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
12756 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
12757 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
12758 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
12759 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12760 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
12761 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12762 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12763 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
12764 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12765 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12766 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12767 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12768 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
12769 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12770 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12771 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
12772 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
12773 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
12774 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
12775 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12776 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12777 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12778 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12779 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12780 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12781 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12782 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12783 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12784 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12785 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12786 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
12787 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12788 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12789 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12790 // CHECK11: omp.precond.then:
12791 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12792 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12793 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12794 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12795 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12796 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12797 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12798 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12799 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12800 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12801 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12802 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12803 // CHECK11: cond.true:
12804 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12805 // CHECK11-NEXT: br label [[COND_END:%.*]]
12806 // CHECK11: cond.false:
12807 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12808 // CHECK11-NEXT: br label [[COND_END]]
12809 // CHECK11: cond.end:
12810 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12811 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12812 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12813 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12814 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12815 // CHECK11: omp.inner.for.cond:
12816 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12817 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
12818 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12819 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12820 // CHECK11: omp.inner.for.body:
12821 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
12822 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
12823 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
12824 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12825 // CHECK11: omp.inner.for.inc:
12826 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12827 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
12828 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
12829 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12830 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
12831 // CHECK11: omp.inner.for.end:
12832 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12833 // CHECK11: omp.loop.exit:
12834 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12835 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
12836 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
12837 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12838 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
12839 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12840 // CHECK11: .omp.final.then:
12841 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12842 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
12843 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12844 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12845 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12846 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
12847 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
12848 // CHECK11: .omp.final.done:
12849 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
12850 // CHECK11: omp.precond.end:
12851 // CHECK11-NEXT: ret void
12852 //
12853 //
12854 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..27
12855 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12856 // CHECK11-NEXT: entry:
12857 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12858 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12859 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12860 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12861 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
12862 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
12863 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
12864 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
12865 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12866 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
12867 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12868 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12869 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
12870 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12871 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12872 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12873 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12874 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
12875 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12876 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12877 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12878 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12879 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
12880 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
12881 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
12882 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
12883 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12884 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12885 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12886 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12887 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12888 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12889 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12890 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12891 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12892 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12893 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12894 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
12895 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12896 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12897 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12898 // CHECK11: omp.precond.then:
12899 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
12900 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12901 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12902 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12903 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12904 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
12905 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
12906 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12907 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12908 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12909 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12910 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12911 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12912 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12913 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12914 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12915 // CHECK11: cond.true:
12916 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12917 // CHECK11-NEXT: br label [[COND_END:%.*]]
12918 // CHECK11: cond.false:
12919 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12920 // CHECK11-NEXT: br label [[COND_END]]
12921 // CHECK11: cond.end:
12922 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12923 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12924 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12925 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12926 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12927 // CHECK11: omp.inner.for.cond:
12928 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12929 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
12930 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12931 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12932 // CHECK11: omp.inner.for.body:
12933 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12934 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12935 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12936 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
12937 // CHECK11-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
12938 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12939 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
12940 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
12941 // CHECK11-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
12942 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12943 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
12944 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
12945 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12946 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
12947 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12948 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
12949 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
12950 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12951 // CHECK11: omp.body.continue:
12952 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12953 // CHECK11: omp.inner.for.inc:
12954 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12955 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
12956 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12957 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
12958 // CHECK11: omp.inner.for.end:
12959 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12960 // CHECK11: omp.loop.exit:
12961 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12962 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12963 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12964 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12965 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12966 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12967 // CHECK11: .omp.final.then:
12968 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12969 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
12970 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12971 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
12972 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
12973 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
12974 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
12975 // CHECK11: .omp.final.done:
12976 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
12977 // CHECK11: omp.precond.end:
12978 // CHECK11-NEXT: ret void
12979 //
12980 //
12981 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
12982 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
12983 // CHECK11-NEXT: entry:
12984 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
12985 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
12986 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
12987 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
12988 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
12989 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
12990 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
12991 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
12992 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12993 // CHECK11-NEXT: ret void
12994 //
12995 //
12996 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..30
12997 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12998 // CHECK11-NEXT: entry:
12999 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13000 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13001 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13002 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13003 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13004 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13005 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13006 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13007 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13008 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13009 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13010 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13011 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13012 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13013 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13014 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13015 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13016 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13017 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13018 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13019 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13020 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13021 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13022 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13023 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13024 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13025 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13026 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13027 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13028 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13029 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13030 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13031 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13032 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13033 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13034 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13035 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13036 // CHECK11: omp.precond.then:
13037 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13038 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13039 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13040 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13041 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13042 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13043 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13044 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13045 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13046 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13047 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13048 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13049 // CHECK11: cond.true:
13050 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13051 // CHECK11-NEXT: br label [[COND_END:%.*]]
13052 // CHECK11: cond.false:
13053 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13054 // CHECK11-NEXT: br label [[COND_END]]
13055 // CHECK11: cond.end:
13056 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13057 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13058 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13059 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13060 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13061 // CHECK11: omp.inner.for.cond:
13062 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
13063 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
13064 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13065 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13066 // CHECK11: omp.inner.for.body:
13067 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
13068 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
13069 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
13070 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13071 // CHECK11: omp.inner.for.inc:
13072 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
13073 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
13074 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
13075 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
13076 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
13077 // CHECK11: omp.inner.for.end:
13078 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13079 // CHECK11: omp.loop.exit:
13080 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13081 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
13082 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
13083 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13084 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
13085 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13086 // CHECK11: .omp.final.then:
13087 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13088 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
13089 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13090 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13091 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13092 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
13093 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13094 // CHECK11: .omp.final.done:
13095 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13096 // CHECK11: omp.precond.end:
13097 // CHECK11-NEXT: ret void
13098 //
13099 //
13100 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..31
13101 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13102 // CHECK11-NEXT: entry:
13103 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13104 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13105 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13106 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13107 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13108 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13109 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13110 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13111 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13112 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13113 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13114 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13115 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13116 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13117 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13118 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13119 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13120 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13121 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13122 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13123 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13124 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13125 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13126 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13127 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13128 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13129 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13130 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13131 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13132 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13133 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13134 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13135 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13136 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13137 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13138 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13139 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13140 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13141 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13142 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13143 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13144 // CHECK11: omp.precond.then:
13145 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
13146 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13147 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13148 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13149 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13150 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13151 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13152 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13153 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13154 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13155 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13156 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13157 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13158 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13159 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13160 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13161 // CHECK11: cond.true:
13162 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13163 // CHECK11-NEXT: br label [[COND_END:%.*]]
13164 // CHECK11: cond.false:
13165 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13166 // CHECK11-NEXT: br label [[COND_END]]
13167 // CHECK11: cond.end:
13168 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13169 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13170 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13171 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13172 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13173 // CHECK11: omp.inner.for.cond:
13174 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
13175 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
13176 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13177 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13178 // CHECK11: omp.inner.for.body:
13179 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
13180 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
13181 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13182 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
13183 // CHECK11-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
13184 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
13185 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
13186 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
13187 // CHECK11-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
13188 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
13189 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13190 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
13191 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
13192 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
13193 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
13194 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13195 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
13196 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13197 // CHECK11: omp.body.continue:
13198 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13199 // CHECK11: omp.inner.for.inc:
13200 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
13201 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
13202 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
13203 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
13204 // CHECK11: omp.inner.for.end:
13205 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13206 // CHECK11: omp.loop.exit:
13207 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13208 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
13209 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
13210 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13211 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13212 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13213 // CHECK11: .omp.final.then:
13214 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13215 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
13216 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
13217 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
13218 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
13219 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
13220 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13221 // CHECK11: .omp.final.done:
13222 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13223 // CHECK11: omp.precond.end:
13224 // CHECK11-NEXT: ret void
13225 //
13226 //
13227 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
13228 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13229 // CHECK11-NEXT: entry:
13230 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
13231 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
13232 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
13233 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
13234 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
13235 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
13236 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
13237 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
13238 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
13239 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
13240 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13241 // CHECK11-NEXT: ret void
13242 //
13243 //
13244 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..34
13245 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13246 // CHECK11-NEXT: entry:
13247 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13248 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13249 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
13250 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13251 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13252 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13253 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13254 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13255 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13256 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13257 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13258 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13259 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13260 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13261 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13262 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13263 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13264 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13265 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13266 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
13267 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13268 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13269 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13270 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13271 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
13272 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13273 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13274 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13275 // CHECK11-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13276 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
13277 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13278 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13279 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
13280 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13281 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13282 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13283 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13284 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13285 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
13286 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13287 // CHECK11: omp.precond.then:
13288 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13289 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13290 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
13291 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13292 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13293 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
13294 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13295 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13296 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
13297 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13298 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13299 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13300 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13301 // CHECK11: cond.true:
13302 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13303 // CHECK11-NEXT: br label [[COND_END:%.*]]
13304 // CHECK11: cond.false:
13305 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13306 // CHECK11-NEXT: br label [[COND_END]]
13307 // CHECK11: cond.end:
13308 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13309 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13310 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13311 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13312 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13313 // CHECK11: omp.inner.for.cond:
13314 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
13315 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
13316 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
13317 // CHECK11-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
13318 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13319 // CHECK11: omp.inner.for.body:
13320 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
13321 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13322 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
13323 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13324 // CHECK11: omp.inner.for.inc:
13325 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
13326 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
13327 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
13328 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
13329 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
13330 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
13331 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
13332 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
13333 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13334 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
13335 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
13336 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13337 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13338 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
13339 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
13340 // CHECK11-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
13341 // CHECK11: cond.true10:
13342 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
13343 // CHECK11-NEXT: br label [[COND_END12:%.*]]
13344 // CHECK11: cond.false11:
13345 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13346 // CHECK11-NEXT: br label [[COND_END12]]
13347 // CHECK11: cond.end12:
13348 // CHECK11-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
13349 // CHECK11-NEXT: store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
13350 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
13351 // CHECK11-NEXT: store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
13352 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
13353 // CHECK11: omp.inner.for.end:
13354 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13355 // CHECK11: omp.loop.exit:
13356 // CHECK11-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13357 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
13358 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
13359 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13360 // CHECK11-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
13361 // CHECK11-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13362 // CHECK11: .omp.final.then:
13363 // CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13364 // CHECK11-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
13365 // CHECK11-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
13366 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
13367 // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
13368 // CHECK11-NEXT: store i32 [[ADD16]], i32* [[I3]], align 4
13369 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13370 // CHECK11: .omp.final.done:
13371 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13372 // CHECK11: omp.precond.end:
13373 // CHECK11-NEXT: ret void
13374 //
13375 //
13376 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..35
13377 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13378 // CHECK11-NEXT: entry:
13379 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13380 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13381 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13382 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13383 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13384 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13385 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13386 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13387 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13388 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13389 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13390 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13391 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13392 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13393 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13394 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13395 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13396 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13397 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13398 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13399 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13400 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13401 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13402 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13403 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13404 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13405 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13406 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13407 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13408 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13409 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13410 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13411 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13412 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13413 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13414 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13415 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13416 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13417 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13418 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13419 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13420 // CHECK11: omp.precond.then:
13421 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
13422 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13423 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13424 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13425 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13426 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13427 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13428 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13429 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13430 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13431 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13432 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13433 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13434 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13435 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13436 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13437 // CHECK11: cond.true:
13438 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13439 // CHECK11-NEXT: br label [[COND_END:%.*]]
13440 // CHECK11: cond.false:
13441 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13442 // CHECK11-NEXT: br label [[COND_END]]
13443 // CHECK11: cond.end:
13444 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13445 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13446 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13447 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13448 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13449 // CHECK11: omp.inner.for.cond:
13450 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
13451 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
13452 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13453 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13454 // CHECK11: omp.inner.for.body:
13455 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
13456 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
13457 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13458 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
13459 // CHECK11-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
13460 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
13461 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
13462 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
13463 // CHECK11-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
13464 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
13465 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13466 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
13467 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
13468 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
13469 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
13470 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13471 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
13472 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13473 // CHECK11: omp.body.continue:
13474 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13475 // CHECK11: omp.inner.for.inc:
13476 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
13477 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
13478 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
13479 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
13480 // CHECK11: omp.inner.for.end:
13481 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13482 // CHECK11: omp.loop.exit:
13483 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13484 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
13485 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
13486 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13487 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13488 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13489 // CHECK11: .omp.final.then:
13490 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13491 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
13492 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
13493 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
13494 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
13495 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
13496 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13497 // CHECK11: .omp.final.done:
13498 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13499 // CHECK11: omp.precond.end:
13500 // CHECK11-NEXT: ret void
13501 //
13502 //
13503 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
13504 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13505 // CHECK11-NEXT: entry:
13506 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
13507 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
13508 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
13509 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
13510 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
13511 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
13512 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
13513 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
13514 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13515 // CHECK11-NEXT: ret void
13516 //
13517 //
13518 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..38
13519 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13520 // CHECK11-NEXT: entry:
13521 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13522 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13523 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13524 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13525 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13526 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13527 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13528 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13529 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13530 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13531 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13532 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13533 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13534 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13535 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13536 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13537 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13538 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13539 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13540 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13541 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13542 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13543 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13544 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13545 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13546 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13547 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13548 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13549 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13550 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13551 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13552 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13553 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13554 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13555 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13556 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13557 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13558 // CHECK11: omp.precond.then:
13559 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13560 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13561 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13562 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13563 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13564 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13565 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13566 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13567 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13568 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13569 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13570 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13571 // CHECK11: cond.true:
13572 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13573 // CHECK11-NEXT: br label [[COND_END:%.*]]
13574 // CHECK11: cond.false:
13575 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13576 // CHECK11-NEXT: br label [[COND_END]]
13577 // CHECK11: cond.end:
13578 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13579 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13580 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13581 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13582 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13583 // CHECK11: omp.inner.for.cond:
13584 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13585 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
13586 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13587 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13588 // CHECK11: omp.inner.for.body:
13589 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
13590 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
13591 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
13592 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13593 // CHECK11: omp.inner.for.inc:
13594 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13595 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
13596 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
13597 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13598 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
13599 // CHECK11: omp.inner.for.end:
13600 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13601 // CHECK11: omp.loop.exit:
13602 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13603 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
13604 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
13605 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13606 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
13607 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13608 // CHECK11: .omp.final.then:
13609 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13610 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
13611 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13612 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13613 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13614 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
13615 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13616 // CHECK11: .omp.final.done:
13617 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13618 // CHECK11: omp.precond.end:
13619 // CHECK11-NEXT: ret void
13620 //
13621 //
13622 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..39
13623 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13624 // CHECK11-NEXT: entry:
13625 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13626 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13627 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13628 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13629 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13630 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13631 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13632 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13633 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13634 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13635 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13636 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13637 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13638 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13639 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13640 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13641 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13642 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
13643 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13644 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13645 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13646 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13647 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13648 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13649 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13650 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13651 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13652 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13653 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13654 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13655 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13656 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13657 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13658 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13659 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13660 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13661 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13662 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13663 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13664 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13665 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13666 // CHECK11: omp.precond.then:
13667 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
13668 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13669 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13670 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13671 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13672 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13673 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13674 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13675 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13676 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13677 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13678 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13679 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13680 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13681 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13682 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13683 // CHECK11: cond.true:
13684 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13685 // CHECK11-NEXT: br label [[COND_END:%.*]]
13686 // CHECK11: cond.false:
13687 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13688 // CHECK11-NEXT: br label [[COND_END]]
13689 // CHECK11: cond.end:
13690 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13691 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13692 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13693 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13694 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13695 // CHECK11: omp.inner.for.cond:
13696 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13697 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
13698 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13699 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13700 // CHECK11: omp.inner.for.body:
13701 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13702 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
13703 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13704 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
13705 // CHECK11-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
13706 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13707 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
13708 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
13709 // CHECK11-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
13710 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13711 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13712 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
13713 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
13714 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
13715 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13716 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13717 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
13718 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13719 // CHECK11: omp.body.continue:
13720 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13721 // CHECK11: omp.inner.for.inc:
13722 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13723 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
13724 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13725 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
13726 // CHECK11: omp.inner.for.end:
13727 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13728 // CHECK11: omp.loop.exit:
13729 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13730 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
13731 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
13732 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13733 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13734 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13735 // CHECK11: .omp.final.then:
13736 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13737 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
13738 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
13739 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
13740 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
13741 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I3]], align 4
13742 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13743 // CHECK11: .omp.final.done:
13744 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13745 // CHECK11: omp.precond.end:
13746 // CHECK11-NEXT: ret void
13747 //
13748 //
13749 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
13750 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13751 // CHECK11-NEXT: entry:
13752 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
13753 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
13754 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
13755 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
13756 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
13757 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
13758 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
13759 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
13760 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
13761 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
13762 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13763 // CHECK11-NEXT: ret void
13764 //
13765 //
13766 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..42
13767 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13768 // CHECK11-NEXT: entry:
13769 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13770 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13771 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
13772 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13773 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13774 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13775 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13776 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13777 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13778 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13779 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13780 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13781 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13782 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13783 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13784 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13785 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13786 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
13787 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13788 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13789 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13790 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
13791 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13792 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13793 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13794 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13795 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
13796 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13797 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13798 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13799 // CHECK11-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13800 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
13801 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13802 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
13803 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13804 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13805 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
13806 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13807 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13808 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13809 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13810 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13811 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
13812 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13813 // CHECK11: omp.precond.then:
13814 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13815 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13816 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
13817 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13818 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13819 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13820 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13821 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13822 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13823 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13824 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13825 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13826 // CHECK11: cond.true:
13827 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13828 // CHECK11-NEXT: br label [[COND_END:%.*]]
13829 // CHECK11: cond.false:
13830 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13831 // CHECK11-NEXT: br label [[COND_END]]
13832 // CHECK11: cond.end:
13833 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13834 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13835 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13836 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13837 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13838 // CHECK11: omp.inner.for.cond:
13839 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13840 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
13841 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13842 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13843 // CHECK11: omp.inner.for.body:
13844 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
13845 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
13846 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
13847 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
13848 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
13849 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
13850 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13851 // CHECK11: omp.inner.for.inc:
13852 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13853 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
13854 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
13855 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13856 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
13857 // CHECK11: omp.inner.for.end:
13858 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13859 // CHECK11: omp.loop.exit:
13860 // CHECK11-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13861 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
13862 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
13863 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13864 // CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
13865 // CHECK11-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13866 // CHECK11: .omp.final.then:
13867 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13868 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
13869 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
13870 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
13871 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
13872 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
13873 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
13874 // CHECK11: .omp.final.done:
13875 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
13876 // CHECK11: omp.precond.end:
13877 // CHECK11-NEXT: ret void
13878 //
13879 //
13880 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..43
13881 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
13882 // CHECK11-NEXT: entry:
13883 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13884 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13885 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13886 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13887 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
13888 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
13889 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
13890 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
13891 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13892 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13893 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
13894 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13895 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13896 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
13897 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13898 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13899 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13900 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13901 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
13902 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13903 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13904 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13905 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13906 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
13907 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
13908 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
13909 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
13910 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13911 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13912 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13913 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13914 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13915 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13916 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13917 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13918 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13919 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13920 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13921 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13922 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
13923 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13924 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13925 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13926 // CHECK11: omp.precond.then:
13927 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
13928 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13929 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13930 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13931 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13932 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13933 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13934 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13935 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13936 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13937 // CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13938 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
13939 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
13940 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
13941 // CHECK11: omp.dispatch.cond:
13942 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13943 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13944 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
13945 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13946 // CHECK11: cond.true:
13947 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13948 // CHECK11-NEXT: br label [[COND_END:%.*]]
13949 // CHECK11: cond.false:
13950 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13951 // CHECK11-NEXT: br label [[COND_END]]
13952 // CHECK11: cond.end:
13953 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
13954 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13955 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13956 // CHECK11-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
13957 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13958 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13959 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
13960 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13961 // CHECK11: omp.dispatch.body:
13962 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13963 // CHECK11: omp.inner.for.cond:
13964 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13965 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
13966 // CHECK11-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
13967 // CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13968 // CHECK11: omp.inner.for.body:
13969 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13970 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
13971 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13972 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
13973 // CHECK11-NEXT: [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
13974 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13975 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13976 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
13977 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
13978 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13979 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13980 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
13981 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
13982 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
13983 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13984 // CHECK11-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
13985 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
13986 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13987 // CHECK11: omp.body.continue:
13988 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13989 // CHECK11: omp.inner.for.inc:
13990 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13991 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
13992 // CHECK11-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13993 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
13994 // CHECK11: omp.inner.for.end:
13995 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13996 // CHECK11: omp.dispatch.inc:
13997 // CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13998 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13999 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
14000 // CHECK11-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
14001 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14002 // CHECK11-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
14003 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
14004 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
14005 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
14006 // CHECK11: omp.dispatch.end:
14007 // CHECK11-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14008 // CHECK11-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
14009 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
14010 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14011 // CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
14012 // CHECK11-NEXT: br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14013 // CHECK11: .omp.final.then:
14014 // CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14015 // CHECK11-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
14016 // CHECK11-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
14017 // CHECK11-NEXT: [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
14018 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
14019 // CHECK11-NEXT: store i32 [[ADD17]], i32* [[I4]], align 4
14020 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
14021 // CHECK11: .omp.final.done:
14022 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
14023 // CHECK11: omp.precond.end:
14024 // CHECK11-NEXT: ret void
14025 //
14026 //
14027 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
14028 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
14029 // CHECK11-NEXT: entry:
14030 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14031 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
14032 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
14033 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
14034 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
14035 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
14036 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
14037 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
14038 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
14039 // CHECK11-NEXT: ret void
14040 //
14041 //
14042 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..46
14043 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
14044 // CHECK11-NEXT: entry:
14045 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14046 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14047 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
14048 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
14049 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
14050 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
14051 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14052 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
14053 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14054 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14055 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
14056 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14057 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14058 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14059 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14060 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
14061 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14062 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14063 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
14064 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
14065 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
14066 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
14067 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
14068 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
14069 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
14070 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
14071 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14072 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14073 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14074 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14075 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14076 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14077 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14078 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
14079 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14080 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14081 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14082 // CHECK11: omp.precond.then:
14083 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14084 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14085 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
14086 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14087 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14088 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14089 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
14090 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14091 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14092 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14093 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14094 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14095 // CHECK11: cond.true:
14096 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14097 // CHECK11-NEXT: br label [[COND_END:%.*]]
14098 // CHECK11: cond.false:
14099 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14100 // CHECK11-NEXT: br label [[COND_END]]
14101 // CHECK11: cond.end:
14102 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14103 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14104 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14105 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
14106 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14107 // CHECK11: omp.inner.for.cond:
14108 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
14109 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
14110 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14111 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14112 // CHECK11: omp.inner.for.body:
14113 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
14114 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
14115 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
14116 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14117 // CHECK11: omp.inner.for.inc:
14118 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
14119 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
14120 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
14121 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
14122 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
14123 // CHECK11: omp.inner.for.end:
14124 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14125 // CHECK11: omp.loop.exit:
14126 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14127 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
14128 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
14129 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14130 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14131 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14132 // CHECK11: .omp.final.then:
14133 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14134 // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
14135 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14136 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14137 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14138 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[I3]], align 4
14139 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
14140 // CHECK11: .omp.final.done:
14141 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
14142 // CHECK11: omp.precond.end:
14143 // CHECK11-NEXT: ret void
14144 //
14145 //
14146 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..47
14147 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
14148 // CHECK11-NEXT: entry:
14149 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14150 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14151 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14152 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14153 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
14154 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
14155 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
14156 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
14157 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14158 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
14159 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14160 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14161 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
14162 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14163 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14164 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14165 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14166 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4
14167 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14168 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14169 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14170 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14171 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
14172 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
14173 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
14174 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
14175 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
14176 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
14177 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
14178 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
14179 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14180 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
14181 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14182 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14183 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14184 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14185 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14186 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
14187 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14188 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14189 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14190 // CHECK11: omp.precond.then:
14191 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
14192 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14193 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14194 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14195 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14196 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
14197 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
14198 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14199 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14200 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14201 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14202 // CHECK11-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14203 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
14204 // CHECK11-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
14205 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14206 // CHECK11: omp.dispatch.cond:
14207 // CHECK11-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14208 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
14209 // CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
14210 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
14211 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14212 // CHECK11: omp.dispatch.body:
14213 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14214 // CHECK11-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
14215 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14216 // CHECK11: omp.inner.for.cond:
14217 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
14218 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
14219 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
14220 // CHECK11-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14221 // CHECK11: omp.inner.for.body:
14222 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
14223 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
14224 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14225 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
14226 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
14227 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
14228 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
14229 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
14230 // CHECK11-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
14231 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
14232 // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
14233 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
14234 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
14235 // CHECK11-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
14236 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
14237 // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
14238 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
14239 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14240 // CHECK11: omp.body.continue:
14241 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14242 // CHECK11: omp.inner.for.inc:
14243 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
14244 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
14245 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
14246 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
14247 // CHECK11: omp.inner.for.end:
14248 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
14249 // CHECK11: omp.dispatch.inc:
14250 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
14251 // CHECK11: omp.dispatch.end:
14252 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14253 // CHECK11-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
14254 // CHECK11-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14255 // CHECK11: .omp.final.then:
14256 // CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14257 // CHECK11-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
14258 // CHECK11-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
14259 // CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
14260 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
14261 // CHECK11-NEXT: store i32 [[ADD12]], i32* [[I3]], align 4
14262 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
14263 // CHECK11: .omp.final.done:
14264 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
14265 // CHECK11: omp.precond.end:
14266 // CHECK11-NEXT: ret void
14267 //
14268 //
14269 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
14270 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
14271 // CHECK11-NEXT: entry:
14272 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32, align 4
14273 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14274 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
14275 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 4
14276 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 4
14277 // CHECK11-NEXT: store i32 [[CH]], i32* [[CH_ADDR]], align 4
14278 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
14279 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
14280 // CHECK11-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 4
14281 // CHECK11-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 4
14282 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
14283 // CHECK11-NEXT: ret void
14284 //
14285 //
14286 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..50
14287 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
14288 // CHECK11-NEXT: entry:
14289 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14290 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14291 // CHECK11-NEXT: [[CH_ADDR:%.*]] = alloca i32*, align 4
14292 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
14293 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
14294 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
14295 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
14296 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14297 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14298 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
14299 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14300 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14301 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
14302 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14303 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14304 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14305 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14306 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
14307 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14308 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14309 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14310 // CHECK11-NEXT: store i32* [[CH]], i32** [[CH_ADDR]], align 4
14311 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
14312 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
14313 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
14314 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
14315 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
14316 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
14317 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
14318 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
14319 // CHECK11-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
14320 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
14321 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
14322 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
14323 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14324 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14325 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
14326 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14327 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14328 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14329 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
14330 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14331 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
14332 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14333 // CHECK11: omp.precond.then:
14334 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
14335 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14336 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
14337 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14338 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14339 // CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14340 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
14341 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14342 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14343 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14344 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
14345 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14346 // CHECK11: cond.true:
14347 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14348 // CHECK11-NEXT: br label [[COND_END:%.*]]
14349 // CHECK11: cond.false:
14350 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
14351 // CHECK11-NEXT: br label [[COND_END]]
14352 // CHECK11: cond.end:
14353 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
14354 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
14355 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
14356 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
14357 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14358 // CHECK11: omp.inner.for.cond:
14359 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
14360 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
14361 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14362 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14363 // CHECK11: omp.inner.for.body:
14364 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
14365 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
14366 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
14367 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
14368 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
14369 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
14370 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14371 // CHECK11: omp.inner.for.inc:
14372 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
14373 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
14374 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
14375 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
14376 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
14377 // CHECK11: omp.inner.for.end:
14378 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14379 // CHECK11: omp.loop.exit:
14380 // CHECK11-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14381 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
14382 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
14383 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14384 // CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
14385 // CHECK11-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14386 // CHECK11: .omp.final.then:
14387 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14388 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
14389 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14390 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
14391 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
14392 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[I4]], align 4
14393 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
14394 // CHECK11: .omp.final.done:
14395 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
14396 // CHECK11: omp.precond.end:
14397 // CHECK11-NEXT: ret void
14398 //
14399 //
14400 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..51
14401 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
14402 // CHECK11-NEXT: entry:
14403 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14404 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14405 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14406 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14407 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4
14408 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 4
14409 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 4
14410 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i32**, align 4
14411 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14412 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14413 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
14414 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14415 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14416 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
14417 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14418 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14419 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14420 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14421 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4
14422 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14423 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14424 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14425 // CHECK11-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14426 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4
14427 // CHECK11-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 4
14428 // CHECK11-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 4
14429 // CHECK11-NEXT: store i32** [[C]], i32*** [[C_ADDR]], align 4
14430 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
14431 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
14432 // CHECK11-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
14433 // CHECK11-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
14434 // CHECK11-NEXT: [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
14435 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
14436 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14437 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14438 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
14439 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14440 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14441 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14442 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4
14443 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14444 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
14445 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14446 // CHECK11: omp.precond.then:
14447 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
14448 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14449 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
14450 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
14451 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
14452 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
14453 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
14454 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14455 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14456 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
14457 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14458 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14459 // CHECK11-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14460 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
14461 // CHECK11-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
14462 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14463 // CHECK11: omp.dispatch.cond:
14464 // CHECK11-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14465 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
14466 // CHECK11-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
14467 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
14468 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14469 // CHECK11: omp.dispatch.body:
14470 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14471 // CHECK11-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
14472 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14473 // CHECK11: omp.inner.for.cond:
14474 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
14475 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
14476 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
14477 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14478 // CHECK11: omp.inner.for.body:
14479 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
14480 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
14481 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14482 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
14483 // CHECK11-NEXT: [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
14484 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
14485 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
14486 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
14487 // CHECK11-NEXT: [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
14488 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
14489 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
14490 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
14491 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
14492 // CHECK11-NEXT: [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
14493 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
14494 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
14495 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
14496 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14497 // CHECK11: omp.body.continue:
14498 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14499 // CHECK11: omp.inner.for.inc:
14500 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
14501 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
14502 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
14503 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
14504 // CHECK11: omp.inner.for.end:
14505 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
14506 // CHECK11: omp.dispatch.inc:
14507 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]]
14508 // CHECK11: omp.dispatch.end:
14509 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14510 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14511 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14512 // CHECK11: .omp.final.then:
14513 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14514 // CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
14515 // CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
14516 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
14517 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
14518 // CHECK11-NEXT: store i32 [[ADD13]], i32* [[I4]], align 4
14519 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]]
14520 // CHECK11: .omp.final.done:
14521 // CHECK11-NEXT: br label [[OMP_PRECOND_END]]
14522 // CHECK11: omp.precond.end:
14523 // CHECK11-NEXT: ret void
14524 //
14525 //
14526 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
14527 // CHECK11-SAME: () #[[ATTR4:[0-9]+]] {
14528 // CHECK11-NEXT: entry:
14529 // CHECK11-NEXT: call void @__tgt_register_requires(i64 1)
14530 // CHECK11-NEXT: ret void
14531 //
14532 //
14533 // CHECK13-LABEL: define {{[^@]+}}@main
14534 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
14535 // CHECK13-NEXT: entry:
14536 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
14537 // CHECK13-NEXT: [[A:%.*]] = alloca double*, align 8
14538 // CHECK13-NEXT: [[B:%.*]] = alloca double*, align 8
14539 // CHECK13-NEXT: [[C:%.*]] = alloca double*, align 8
14540 // CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4
14541 // CHECK13-NEXT: [[CH:%.*]] = alloca i32, align 4
14542 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
14543 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14544 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14545 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14546 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14547 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
14548 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14549 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
14550 // CHECK13-NEXT: [[_TMP15:%.*]] = alloca i32, align 4
14551 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
14552 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
14553 // CHECK13-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4
14554 // CHECK13-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4
14555 // CHECK13-NEXT: [[I23:%.*]] = alloca i32, align 4
14556 // CHECK13-NEXT: [[DOTOMP_IV26:%.*]] = alloca i32, align 4
14557 // CHECK13-NEXT: [[I27:%.*]] = alloca i32, align 4
14558 // CHECK13-NEXT: [[_TMP49:%.*]] = alloca i32, align 4
14559 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
14560 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
14561 // CHECK13-NEXT: [[DOTOMP_LB55:%.*]] = alloca i32, align 4
14562 // CHECK13-NEXT: [[DOTOMP_UB56:%.*]] = alloca i32, align 4
14563 // CHECK13-NEXT: [[I57:%.*]] = alloca i32, align 4
14564 // CHECK13-NEXT: [[DOTOMP_IV60:%.*]] = alloca i32, align 4
14565 // CHECK13-NEXT: [[I61:%.*]] = alloca i32, align 4
14566 // CHECK13-NEXT: [[_TMP83:%.*]] = alloca i32, align 4
14567 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
14568 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
14569 // CHECK13-NEXT: [[DOTOMP_LB89:%.*]] = alloca i32, align 4
14570 // CHECK13-NEXT: [[DOTOMP_UB90:%.*]] = alloca i32, align 4
14571 // CHECK13-NEXT: [[I91:%.*]] = alloca i32, align 4
14572 // CHECK13-NEXT: [[DOTOMP_IV94:%.*]] = alloca i32, align 4
14573 // CHECK13-NEXT: [[I95:%.*]] = alloca i32, align 4
14574 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
14575 // CHECK13-NEXT: [[_TMP118:%.*]] = alloca i32, align 4
14576 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
14577 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
14578 // CHECK13-NEXT: [[DOTOMP_LB124:%.*]] = alloca i32, align 4
14579 // CHECK13-NEXT: [[DOTOMP_UB125:%.*]] = alloca i32, align 4
14580 // CHECK13-NEXT: [[I126:%.*]] = alloca i32, align 4
14581 // CHECK13-NEXT: [[DOTOMP_IV129:%.*]] = alloca i32, align 4
14582 // CHECK13-NEXT: [[I130:%.*]] = alloca i32, align 4
14583 // CHECK13-NEXT: [[_TMP152:%.*]] = alloca i32, align 4
14584 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
14585 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
14586 // CHECK13-NEXT: [[DOTOMP_LB158:%.*]] = alloca i32, align 4
14587 // CHECK13-NEXT: [[DOTOMP_UB159:%.*]] = alloca i32, align 4
14588 // CHECK13-NEXT: [[I160:%.*]] = alloca i32, align 4
14589 // CHECK13-NEXT: [[DOTOMP_IV163:%.*]] = alloca i32, align 4
14590 // CHECK13-NEXT: [[I164:%.*]] = alloca i32, align 4
14591 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
14592 // CHECK13-NEXT: [[_TMP187:%.*]] = alloca i32, align 4
14593 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
14594 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
14595 // CHECK13-NEXT: [[DOTOMP_LB193:%.*]] = alloca i32, align 4
14596 // CHECK13-NEXT: [[DOTOMP_UB194:%.*]] = alloca i32, align 4
14597 // CHECK13-NEXT: [[I195:%.*]] = alloca i32, align 4
14598 // CHECK13-NEXT: [[DOTOMP_IV198:%.*]] = alloca i32, align 4
14599 // CHECK13-NEXT: [[I199:%.*]] = alloca i32, align 4
14600 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4
14601 // CHECK13-NEXT: store i32 10000, i32* [[N]], align 4
14602 // CHECK13-NEXT: store i32 100, i32* [[CH]], align 4
14603 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
14604 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
14605 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14606 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
14607 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14608 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14609 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14610 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
14611 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14612 // CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
14613 // CHECK13-NEXT: store i32 0, i32* [[I]], align 4
14614 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14615 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14616 // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
14617 // CHECK13: simd.if.then:
14618 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14619 // CHECK13-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
14620 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14621 // CHECK13: omp.inner.for.cond:
14622 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14623 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
14624 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
14625 // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14626 // CHECK13: omp.inner.for.body:
14627 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14628 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
14629 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14630 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
14631 // CHECK13-NEXT: [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
14632 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14633 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
14634 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
14635 // CHECK13-NEXT: [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
14636 // CHECK13-NEXT: [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
14637 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14638 // CHECK13-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
14639 // CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
14640 // CHECK13-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
14641 // CHECK13-NEXT: [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
14642 // CHECK13-NEXT: [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
14643 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14644 // CHECK13-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
14645 // CHECK13-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
14646 // CHECK13-NEXT: store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
14647 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14648 // CHECK13: omp.body.continue:
14649 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14650 // CHECK13: omp.inner.for.inc:
14651 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14652 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
14653 // CHECK13-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14654 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
14655 // CHECK13: omp.inner.for.end:
14656 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14657 // CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
14658 // CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
14659 // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
14660 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14661 // CHECK13-NEXT: store i32 [[ADD14]], i32* [[I3]], align 4
14662 // CHECK13-NEXT: br label [[SIMD_IF_END]]
14663 // CHECK13: simd.if.end:
14664 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[N]], align 4
14665 // CHECK13-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
14666 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14667 // CHECK13-NEXT: [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
14668 // CHECK13-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
14669 // CHECK13-NEXT: [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
14670 // CHECK13-NEXT: store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
14671 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB21]], align 4
14672 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
14673 // CHECK13-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
14674 // CHECK13-NEXT: store i32 0, i32* [[I23]], align 4
14675 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14676 // CHECK13-NEXT: [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
14677 // CHECK13-NEXT: br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
14678 // CHECK13: simd.if.then25:
14679 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
14680 // CHECK13-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
14681 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND28:%.*]]
14682 // CHECK13: omp.inner.for.cond28:
14683 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14684 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
14685 // CHECK13-NEXT: [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
14686 // CHECK13-NEXT: br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
14687 // CHECK13: omp.inner.for.body30:
14688 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14689 // CHECK13-NEXT: [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
14690 // CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
14691 // CHECK13-NEXT: store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
14692 // CHECK13-NEXT: [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
14693 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14694 // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
14695 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
14696 // CHECK13-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
14697 // CHECK13-NEXT: [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
14698 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14699 // CHECK13-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
14700 // CHECK13-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
14701 // CHECK13-NEXT: [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
14702 // CHECK13-NEXT: [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
14703 // CHECK13-NEXT: [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
14704 // CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14705 // CHECK13-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
14706 // CHECK13-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
14707 // CHECK13-NEXT: store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
14708 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE40:%.*]]
14709 // CHECK13: omp.body.continue40:
14710 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC41:%.*]]
14711 // CHECK13: omp.inner.for.inc41:
14712 // CHECK13-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14713 // CHECK13-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
14714 // CHECK13-NEXT: store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14715 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
14716 // CHECK13: omp.inner.for.end43:
14717 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14718 // CHECK13-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
14719 // CHECK13-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
14720 // CHECK13-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
14721 // CHECK13-NEXT: [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
14722 // CHECK13-NEXT: store i32 [[ADD47]], i32* [[I27]], align 4
14723 // CHECK13-NEXT: br label [[SIMD_IF_END48]]
14724 // CHECK13: simd.if.end48:
14725 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[N]], align 4
14726 // CHECK13-NEXT: store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
14727 // CHECK13-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14728 // CHECK13-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
14729 // CHECK13-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
14730 // CHECK13-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
14731 // CHECK13-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
14732 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB55]], align 4
14733 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
14734 // CHECK13-NEXT: store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
14735 // CHECK13-NEXT: store i32 0, i32* [[I57]], align 4
14736 // CHECK13-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14737 // CHECK13-NEXT: [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
14738 // CHECK13-NEXT: br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
14739 // CHECK13: simd.if.then59:
14740 // CHECK13-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
14741 // CHECK13-NEXT: store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
14742 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND62:%.*]]
14743 // CHECK13: omp.inner.for.cond62:
14744 // CHECK13-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14745 // CHECK13-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
14746 // CHECK13-NEXT: [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
14747 // CHECK13-NEXT: br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
14748 // CHECK13: omp.inner.for.body64:
14749 // CHECK13-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14750 // CHECK13-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
14751 // CHECK13-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
14752 // CHECK13-NEXT: store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
14753 // CHECK13-NEXT: [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
14754 // CHECK13-NEXT: [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14755 // CHECK13-NEXT: [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
14756 // CHECK13-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
14757 // CHECK13-NEXT: [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
14758 // CHECK13-NEXT: [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
14759 // CHECK13-NEXT: [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14760 // CHECK13-NEXT: [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
14761 // CHECK13-NEXT: [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
14762 // CHECK13-NEXT: [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
14763 // CHECK13-NEXT: [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
14764 // CHECK13-NEXT: [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
14765 // CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14766 // CHECK13-NEXT: [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
14767 // CHECK13-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
14768 // CHECK13-NEXT: store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
14769 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE74:%.*]]
14770 // CHECK13: omp.body.continue74:
14771 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC75:%.*]]
14772 // CHECK13: omp.inner.for.inc75:
14773 // CHECK13-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14774 // CHECK13-NEXT: [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
14775 // CHECK13-NEXT: store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14776 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
14777 // CHECK13: omp.inner.for.end77:
14778 // CHECK13-NEXT: [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14779 // CHECK13-NEXT: [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
14780 // CHECK13-NEXT: [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
14781 // CHECK13-NEXT: [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
14782 // CHECK13-NEXT: [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
14783 // CHECK13-NEXT: store i32 [[ADD81]], i32* [[I61]], align 4
14784 // CHECK13-NEXT: br label [[SIMD_IF_END82]]
14785 // CHECK13: simd.if.end82:
14786 // CHECK13-NEXT: [[TMP54:%.*]] = load i32, i32* [[N]], align 4
14787 // CHECK13-NEXT: store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
14788 // CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14789 // CHECK13-NEXT: [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
14790 // CHECK13-NEXT: [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
14791 // CHECK13-NEXT: [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
14792 // CHECK13-NEXT: store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
14793 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB89]], align 4
14794 // CHECK13-NEXT: [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
14795 // CHECK13-NEXT: store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
14796 // CHECK13-NEXT: store i32 0, i32* [[I91]], align 4
14797 // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14798 // CHECK13-NEXT: [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
14799 // CHECK13-NEXT: br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
14800 // CHECK13: simd.if.then93:
14801 // CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
14802 // CHECK13-NEXT: store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
14803 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND96:%.*]]
14804 // CHECK13: omp.inner.for.cond96:
14805 // CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14806 // CHECK13-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
14807 // CHECK13-NEXT: [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
14808 // CHECK13-NEXT: br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
14809 // CHECK13: omp.inner.for.body98:
14810 // CHECK13-NEXT: [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14811 // CHECK13-NEXT: [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
14812 // CHECK13-NEXT: [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
14813 // CHECK13-NEXT: store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
14814 // CHECK13-NEXT: [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
14815 // CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14816 // CHECK13-NEXT: [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
14817 // CHECK13-NEXT: [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
14818 // CHECK13-NEXT: [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
14819 // CHECK13-NEXT: [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
14820 // CHECK13-NEXT: [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14821 // CHECK13-NEXT: [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
14822 // CHECK13-NEXT: [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
14823 // CHECK13-NEXT: [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
14824 // CHECK13-NEXT: [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
14825 // CHECK13-NEXT: [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
14826 // CHECK13-NEXT: [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14827 // CHECK13-NEXT: [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
14828 // CHECK13-NEXT: [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
14829 // CHECK13-NEXT: store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
14830 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE108:%.*]]
14831 // CHECK13: omp.body.continue108:
14832 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC109:%.*]]
14833 // CHECK13: omp.inner.for.inc109:
14834 // CHECK13-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14835 // CHECK13-NEXT: [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
14836 // CHECK13-NEXT: store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14837 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
14838 // CHECK13: omp.inner.for.end111:
14839 // CHECK13-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14840 // CHECK13-NEXT: [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
14841 // CHECK13-NEXT: [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
14842 // CHECK13-NEXT: [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
14843 // CHECK13-NEXT: [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
14844 // CHECK13-NEXT: store i32 [[ADD115]], i32* [[I95]], align 4
14845 // CHECK13-NEXT: br label [[SIMD_IF_END116]]
14846 // CHECK13: simd.if.end116:
14847 // CHECK13-NEXT: [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
14848 // CHECK13-NEXT: store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
14849 // CHECK13-NEXT: [[TMP73:%.*]] = load i32, i32* [[N]], align 4
14850 // CHECK13-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
14851 // CHECK13-NEXT: [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14852 // CHECK13-NEXT: [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
14853 // CHECK13-NEXT: [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
14854 // CHECK13-NEXT: [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
14855 // CHECK13-NEXT: store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
14856 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB124]], align 4
14857 // CHECK13-NEXT: [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
14858 // CHECK13-NEXT: store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
14859 // CHECK13-NEXT: store i32 0, i32* [[I126]], align 4
14860 // CHECK13-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14861 // CHECK13-NEXT: [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
14862 // CHECK13-NEXT: br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
14863 // CHECK13: simd.if.then128:
14864 // CHECK13-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
14865 // CHECK13-NEXT: store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
14866 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND131:%.*]]
14867 // CHECK13: omp.inner.for.cond131:
14868 // CHECK13-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14869 // CHECK13-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
14870 // CHECK13-NEXT: [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
14871 // CHECK13-NEXT: br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
14872 // CHECK13: omp.inner.for.body133:
14873 // CHECK13-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14874 // CHECK13-NEXT: [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
14875 // CHECK13-NEXT: [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
14876 // CHECK13-NEXT: store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
14877 // CHECK13-NEXT: [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
14878 // CHECK13-NEXT: [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14879 // CHECK13-NEXT: [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
14880 // CHECK13-NEXT: [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
14881 // CHECK13-NEXT: [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
14882 // CHECK13-NEXT: [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
14883 // CHECK13-NEXT: [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14884 // CHECK13-NEXT: [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
14885 // CHECK13-NEXT: [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
14886 // CHECK13-NEXT: [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
14887 // CHECK13-NEXT: [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
14888 // CHECK13-NEXT: [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
14889 // CHECK13-NEXT: [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14890 // CHECK13-NEXT: [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
14891 // CHECK13-NEXT: [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
14892 // CHECK13-NEXT: store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
14893 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE143:%.*]]
14894 // CHECK13: omp.body.continue143:
14895 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC144:%.*]]
14896 // CHECK13: omp.inner.for.inc144:
14897 // CHECK13-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14898 // CHECK13-NEXT: [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
14899 // CHECK13-NEXT: store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14900 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
14901 // CHECK13: omp.inner.for.end146:
14902 // CHECK13-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14903 // CHECK13-NEXT: [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
14904 // CHECK13-NEXT: [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
14905 // CHECK13-NEXT: [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
14906 // CHECK13-NEXT: [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
14907 // CHECK13-NEXT: store i32 [[ADD150]], i32* [[I130]], align 4
14908 // CHECK13-NEXT: br label [[SIMD_IF_END151]]
14909 // CHECK13: simd.if.end151:
14910 // CHECK13-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4
14911 // CHECK13-NEXT: store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
14912 // CHECK13-NEXT: [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14913 // CHECK13-NEXT: [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
14914 // CHECK13-NEXT: [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
14915 // CHECK13-NEXT: [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
14916 // CHECK13-NEXT: store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
14917 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB158]], align 4
14918 // CHECK13-NEXT: [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
14919 // CHECK13-NEXT: store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
14920 // CHECK13-NEXT: store i32 0, i32* [[I160]], align 4
14921 // CHECK13-NEXT: [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14922 // CHECK13-NEXT: [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
14923 // CHECK13-NEXT: br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
14924 // CHECK13: simd.if.then162:
14925 // CHECK13-NEXT: [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
14926 // CHECK13-NEXT: store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
14927 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND165:%.*]]
14928 // CHECK13: omp.inner.for.cond165:
14929 // CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14930 // CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
14931 // CHECK13-NEXT: [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
14932 // CHECK13-NEXT: br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
14933 // CHECK13: omp.inner.for.body167:
14934 // CHECK13-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14935 // CHECK13-NEXT: [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
14936 // CHECK13-NEXT: [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
14937 // CHECK13-NEXT: store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
14938 // CHECK13-NEXT: [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
14939 // CHECK13-NEXT: [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14940 // CHECK13-NEXT: [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
14941 // CHECK13-NEXT: [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
14942 // CHECK13-NEXT: [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
14943 // CHECK13-NEXT: [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
14944 // CHECK13-NEXT: [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14945 // CHECK13-NEXT: [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
14946 // CHECK13-NEXT: [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
14947 // CHECK13-NEXT: [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
14948 // CHECK13-NEXT: [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
14949 // CHECK13-NEXT: [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
14950 // CHECK13-NEXT: [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14951 // CHECK13-NEXT: [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
14952 // CHECK13-NEXT: [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
14953 // CHECK13-NEXT: store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
14954 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE177:%.*]]
14955 // CHECK13: omp.body.continue177:
14956 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC178:%.*]]
14957 // CHECK13: omp.inner.for.inc178:
14958 // CHECK13-NEXT: [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14959 // CHECK13-NEXT: [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
14960 // CHECK13-NEXT: store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14961 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
14962 // CHECK13: omp.inner.for.end180:
14963 // CHECK13-NEXT: [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14964 // CHECK13-NEXT: [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
14965 // CHECK13-NEXT: [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
14966 // CHECK13-NEXT: [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
14967 // CHECK13-NEXT: [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
14968 // CHECK13-NEXT: store i32 [[ADD184]], i32* [[I164]], align 4
14969 // CHECK13-NEXT: br label [[SIMD_IF_END185]]
14970 // CHECK13: simd.if.end185:
14971 // CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
14972 // CHECK13-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
14973 // CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[N]], align 4
14974 // CHECK13-NEXT: store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
14975 // CHECK13-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14976 // CHECK13-NEXT: [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
14977 // CHECK13-NEXT: [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
14978 // CHECK13-NEXT: [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
14979 // CHECK13-NEXT: store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
14980 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB193]], align 4
14981 // CHECK13-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
14982 // CHECK13-NEXT: store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
14983 // CHECK13-NEXT: store i32 0, i32* [[I195]], align 4
14984 // CHECK13-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14985 // CHECK13-NEXT: [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
14986 // CHECK13-NEXT: br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
14987 // CHECK13: simd.if.then197:
14988 // CHECK13-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
14989 // CHECK13-NEXT: store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
14990 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND200:%.*]]
14991 // CHECK13: omp.inner.for.cond200:
14992 // CHECK13-NEXT: [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14993 // CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
14994 // CHECK13-NEXT: [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
14995 // CHECK13-NEXT: br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
14996 // CHECK13: omp.inner.for.body202:
14997 // CHECK13-NEXT: [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14998 // CHECK13-NEXT: [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
14999 // CHECK13-NEXT: [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
15000 // CHECK13-NEXT: store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
15001 // CHECK13-NEXT: [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
15002 // CHECK13-NEXT: [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
15003 // CHECK13-NEXT: [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
15004 // CHECK13-NEXT: [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
15005 // CHECK13-NEXT: [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
15006 // CHECK13-NEXT: [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
15007 // CHECK13-NEXT: [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
15008 // CHECK13-NEXT: [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
15009 // CHECK13-NEXT: [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
15010 // CHECK13-NEXT: [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
15011 // CHECK13-NEXT: [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
15012 // CHECK13-NEXT: [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
15013 // CHECK13-NEXT: [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
15014 // CHECK13-NEXT: [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
15015 // CHECK13-NEXT: [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
15016 // CHECK13-NEXT: store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
15017 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE212:%.*]]
15018 // CHECK13: omp.body.continue212:
15019 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC213:%.*]]
15020 // CHECK13: omp.inner.for.inc213:
15021 // CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
15022 // CHECK13-NEXT: [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
15023 // CHECK13-NEXT: store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
15024 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
15025 // CHECK13: omp.inner.for.end215:
15026 // CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
15027 // CHECK13-NEXT: [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
15028 // CHECK13-NEXT: [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
15029 // CHECK13-NEXT: [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
15030 // CHECK13-NEXT: [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
15031 // CHECK13-NEXT: store i32 [[ADD219]], i32* [[I199]], align 4
15032 // CHECK13-NEXT: br label [[SIMD_IF_END220]]
15033 // CHECK13: simd.if.end220:
15034 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
15035 // CHECK13-NEXT: ret i32 [[CALL]]
15036 //
15037 //
15038 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
15039 // CHECK13-SAME: () #[[ATTR1:[0-9]+]] comdat {
15040 // CHECK13-NEXT: entry:
15041 // CHECK13-NEXT: [[A:%.*]] = alloca i32*, align 8
15042 // CHECK13-NEXT: [[B:%.*]] = alloca i32*, align 8
15043 // CHECK13-NEXT: [[C:%.*]] = alloca i32*, align 8
15044 // CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4
15045 // CHECK13-NEXT: [[CH:%.*]] = alloca i32, align 4
15046 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
15047 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15048 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15049 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15050 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15051 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
15052 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15053 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
15054 // CHECK13-NEXT: [[_TMP15:%.*]] = alloca i32, align 4
15055 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
15056 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
15057 // CHECK13-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4
15058 // CHECK13-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4
15059 // CHECK13-NEXT: [[I23:%.*]] = alloca i32, align 4
15060 // CHECK13-NEXT: [[DOTOMP_IV26:%.*]] = alloca i32, align 4
15061 // CHECK13-NEXT: [[I27:%.*]] = alloca i32, align 4
15062 // CHECK13-NEXT: [[_TMP49:%.*]] = alloca i32, align 4
15063 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
15064 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
15065 // CHECK13-NEXT: [[DOTOMP_LB55:%.*]] = alloca i32, align 4
15066 // CHECK13-NEXT: [[DOTOMP_UB56:%.*]] = alloca i32, align 4
15067 // CHECK13-NEXT: [[I57:%.*]] = alloca i32, align 4
15068 // CHECK13-NEXT: [[DOTOMP_IV60:%.*]] = alloca i32, align 4
15069 // CHECK13-NEXT: [[I61:%.*]] = alloca i32, align 4
15070 // CHECK13-NEXT: [[_TMP83:%.*]] = alloca i32, align 4
15071 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
15072 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
15073 // CHECK13-NEXT: [[DOTOMP_LB89:%.*]] = alloca i32, align 4
15074 // CHECK13-NEXT: [[DOTOMP_UB90:%.*]] = alloca i32, align 4
15075 // CHECK13-NEXT: [[I91:%.*]] = alloca i32, align 4
15076 // CHECK13-NEXT: [[DOTOMP_IV94:%.*]] = alloca i32, align 4
15077 // CHECK13-NEXT: [[I95:%.*]] = alloca i32, align 4
15078 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
15079 // CHECK13-NEXT: [[_TMP118:%.*]] = alloca i32, align 4
15080 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
15081 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
15082 // CHECK13-NEXT: [[DOTOMP_LB124:%.*]] = alloca i32, align 4
15083 // CHECK13-NEXT: [[DOTOMP_UB125:%.*]] = alloca i32, align 4
15084 // CHECK13-NEXT: [[I126:%.*]] = alloca i32, align 4
15085 // CHECK13-NEXT: [[DOTOMP_IV129:%.*]] = alloca i32, align 4
15086 // CHECK13-NEXT: [[I130:%.*]] = alloca i32, align 4
15087 // CHECK13-NEXT: [[_TMP152:%.*]] = alloca i32, align 4
15088 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
15089 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
15090 // CHECK13-NEXT: [[DOTOMP_LB158:%.*]] = alloca i32, align 4
15091 // CHECK13-NEXT: [[DOTOMP_UB159:%.*]] = alloca i32, align 4
15092 // CHECK13-NEXT: [[I160:%.*]] = alloca i32, align 4
15093 // CHECK13-NEXT: [[DOTOMP_IV163:%.*]] = alloca i32, align 4
15094 // CHECK13-NEXT: [[I164:%.*]] = alloca i32, align 4
15095 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
15096 // CHECK13-NEXT: [[_TMP187:%.*]] = alloca i32, align 4
15097 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
15098 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
15099 // CHECK13-NEXT: [[DOTOMP_LB193:%.*]] = alloca i32, align 4
15100 // CHECK13-NEXT: [[DOTOMP_UB194:%.*]] = alloca i32, align 4
15101 // CHECK13-NEXT: [[I195:%.*]] = alloca i32, align 4
15102 // CHECK13-NEXT: [[DOTOMP_IV198:%.*]] = alloca i32, align 4
15103 // CHECK13-NEXT: [[I199:%.*]] = alloca i32, align 4
15104 // CHECK13-NEXT: store i32 10000, i32* [[N]], align 4
15105 // CHECK13-NEXT: store i32 100, i32* [[CH]], align 4
15106 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15107 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15108 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15109 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
15110 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15111 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15112 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15113 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
15114 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15115 // CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
15116 // CHECK13-NEXT: store i32 0, i32* [[I]], align 4
15117 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15118 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15119 // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15120 // CHECK13: simd.if.then:
15121 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15122 // CHECK13-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
15123 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15124 // CHECK13: omp.inner.for.cond:
15125 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15126 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
15127 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
15128 // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15129 // CHECK13: omp.inner.for.body:
15130 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15131 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
15132 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15133 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
15134 // CHECK13-NEXT: [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
15135 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
15136 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
15137 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
15138 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15139 // CHECK13-NEXT: [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
15140 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
15141 // CHECK13-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
15142 // CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
15143 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
15144 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
15145 // CHECK13-NEXT: [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
15146 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
15147 // CHECK13-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
15148 // CHECK13-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
15149 // CHECK13-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
15150 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15151 // CHECK13: omp.body.continue:
15152 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15153 // CHECK13: omp.inner.for.inc:
15154 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15155 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
15156 // CHECK13-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15157 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
15158 // CHECK13: omp.inner.for.end:
15159 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15160 // CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
15161 // CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
15162 // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
15163 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
15164 // CHECK13-NEXT: store i32 [[ADD14]], i32* [[I3]], align 4
15165 // CHECK13-NEXT: br label [[SIMD_IF_END]]
15166 // CHECK13: simd.if.end:
15167 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[N]], align 4
15168 // CHECK13-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
15169 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
15170 // CHECK13-NEXT: [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
15171 // CHECK13-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
15172 // CHECK13-NEXT: [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
15173 // CHECK13-NEXT: store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
15174 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB21]], align 4
15175 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
15176 // CHECK13-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
15177 // CHECK13-NEXT: store i32 0, i32* [[I23]], align 4
15178 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
15179 // CHECK13-NEXT: [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
15180 // CHECK13-NEXT: br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
15181 // CHECK13: simd.if.then25:
15182 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
15183 // CHECK13-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
15184 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND28:%.*]]
15185 // CHECK13: omp.inner.for.cond28:
15186 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
15187 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
15188 // CHECK13-NEXT: [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
15189 // CHECK13-NEXT: br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
15190 // CHECK13: omp.inner.for.body30:
15191 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
15192 // CHECK13-NEXT: [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
15193 // CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
15194 // CHECK13-NEXT: store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
15195 // CHECK13-NEXT: [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
15196 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
15197 // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
15198 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
15199 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
15200 // CHECK13-NEXT: [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
15201 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
15202 // CHECK13-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
15203 // CHECK13-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
15204 // CHECK13-NEXT: [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
15205 // CHECK13-NEXT: [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
15206 // CHECK13-NEXT: [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
15207 // CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
15208 // CHECK13-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
15209 // CHECK13-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
15210 // CHECK13-NEXT: store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
15211 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE40:%.*]]
15212 // CHECK13: omp.body.continue40:
15213 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC41:%.*]]
15214 // CHECK13: omp.inner.for.inc41:
15215 // CHECK13-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
15216 // CHECK13-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
15217 // CHECK13-NEXT: store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
15218 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
15219 // CHECK13: omp.inner.for.end43:
15220 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
15221 // CHECK13-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
15222 // CHECK13-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
15223 // CHECK13-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
15224 // CHECK13-NEXT: [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
15225 // CHECK13-NEXT: store i32 [[ADD47]], i32* [[I27]], align 4
15226 // CHECK13-NEXT: br label [[SIMD_IF_END48]]
15227 // CHECK13: simd.if.end48:
15228 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[N]], align 4
15229 // CHECK13-NEXT: store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
15230 // CHECK13-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
15231 // CHECK13-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
15232 // CHECK13-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
15233 // CHECK13-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
15234 // CHECK13-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
15235 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB55]], align 4
15236 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
15237 // CHECK13-NEXT: store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
15238 // CHECK13-NEXT: store i32 0, i32* [[I57]], align 4
15239 // CHECK13-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
15240 // CHECK13-NEXT: [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
15241 // CHECK13-NEXT: br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
15242 // CHECK13: simd.if.then59:
15243 // CHECK13-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
15244 // CHECK13-NEXT: store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
15245 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND62:%.*]]
15246 // CHECK13: omp.inner.for.cond62:
15247 // CHECK13-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
15248 // CHECK13-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
15249 // CHECK13-NEXT: [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
15250 // CHECK13-NEXT: br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
15251 // CHECK13: omp.inner.for.body64:
15252 // CHECK13-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
15253 // CHECK13-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
15254 // CHECK13-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
15255 // CHECK13-NEXT: store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
15256 // CHECK13-NEXT: [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
15257 // CHECK13-NEXT: [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
15258 // CHECK13-NEXT: [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
15259 // CHECK13-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
15260 // CHECK13-NEXT: [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
15261 // CHECK13-NEXT: [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
15262 // CHECK13-NEXT: [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
15263 // CHECK13-NEXT: [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
15264 // CHECK13-NEXT: [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
15265 // CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
15266 // CHECK13-NEXT: [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
15267 // CHECK13-NEXT: [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
15268 // CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
15269 // CHECK13-NEXT: [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
15270 // CHECK13-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
15271 // CHECK13-NEXT: store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
15272 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE74:%.*]]
15273 // CHECK13: omp.body.continue74:
15274 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC75:%.*]]
15275 // CHECK13: omp.inner.for.inc75:
15276 // CHECK13-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
15277 // CHECK13-NEXT: [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
15278 // CHECK13-NEXT: store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
15279 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
15280 // CHECK13: omp.inner.for.end77:
15281 // CHECK13-NEXT: [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
15282 // CHECK13-NEXT: [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
15283 // CHECK13-NEXT: [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
15284 // CHECK13-NEXT: [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
15285 // CHECK13-NEXT: [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
15286 // CHECK13-NEXT: store i32 [[ADD81]], i32* [[I61]], align 4
15287 // CHECK13-NEXT: br label [[SIMD_IF_END82]]
15288 // CHECK13: simd.if.end82:
15289 // CHECK13-NEXT: [[TMP54:%.*]] = load i32, i32* [[N]], align 4
15290 // CHECK13-NEXT: store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
15291 // CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
15292 // CHECK13-NEXT: [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
15293 // CHECK13-NEXT: [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
15294 // CHECK13-NEXT: [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
15295 // CHECK13-NEXT: store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
15296 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB89]], align 4
15297 // CHECK13-NEXT: [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
15298 // CHECK13-NEXT: store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
15299 // CHECK13-NEXT: store i32 0, i32* [[I91]], align 4
15300 // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
15301 // CHECK13-NEXT: [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
15302 // CHECK13-NEXT: br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
15303 // CHECK13: simd.if.then93:
15304 // CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
15305 // CHECK13-NEXT: store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
15306 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND96:%.*]]
15307 // CHECK13: omp.inner.for.cond96:
15308 // CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
15309 // CHECK13-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
15310 // CHECK13-NEXT: [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
15311 // CHECK13-NEXT: br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
15312 // CHECK13: omp.inner.for.body98:
15313 // CHECK13-NEXT: [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
15314 // CHECK13-NEXT: [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
15315 // CHECK13-NEXT: [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
15316 // CHECK13-NEXT: store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
15317 // CHECK13-NEXT: [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
15318 // CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
15319 // CHECK13-NEXT: [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
15320 // CHECK13-NEXT: [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
15321 // CHECK13-NEXT: [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
15322 // CHECK13-NEXT: [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
15323 // CHECK13-NEXT: [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
15324 // CHECK13-NEXT: [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
15325 // CHECK13-NEXT: [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
15326 // CHECK13-NEXT: [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
15327 // CHECK13-NEXT: [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
15328 // CHECK13-NEXT: [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
15329 // CHECK13-NEXT: [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
15330 // CHECK13-NEXT: [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
15331 // CHECK13-NEXT: [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
15332 // CHECK13-NEXT: store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
15333 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE108:%.*]]
15334 // CHECK13: omp.body.continue108:
15335 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC109:%.*]]
15336 // CHECK13: omp.inner.for.inc109:
15337 // CHECK13-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
15338 // CHECK13-NEXT: [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
15339 // CHECK13-NEXT: store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
15340 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
15341 // CHECK13: omp.inner.for.end111:
15342 // CHECK13-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
15343 // CHECK13-NEXT: [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
15344 // CHECK13-NEXT: [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
15345 // CHECK13-NEXT: [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
15346 // CHECK13-NEXT: [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
15347 // CHECK13-NEXT: store i32 [[ADD115]], i32* [[I95]], align 4
15348 // CHECK13-NEXT: br label [[SIMD_IF_END116]]
15349 // CHECK13: simd.if.end116:
15350 // CHECK13-NEXT: [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
15351 // CHECK13-NEXT: store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
15352 // CHECK13-NEXT: [[TMP73:%.*]] = load i32, i32* [[N]], align 4
15353 // CHECK13-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
15354 // CHECK13-NEXT: [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
15355 // CHECK13-NEXT: [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
15356 // CHECK13-NEXT: [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
15357 // CHECK13-NEXT: [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
15358 // CHECK13-NEXT: store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
15359 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB124]], align 4
15360 // CHECK13-NEXT: [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
15361 // CHECK13-NEXT: store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
15362 // CHECK13-NEXT: store i32 0, i32* [[I126]], align 4
15363 // CHECK13-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
15364 // CHECK13-NEXT: [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
15365 // CHECK13-NEXT: br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
15366 // CHECK13: simd.if.then128:
15367 // CHECK13-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
15368 // CHECK13-NEXT: store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
15369 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND131:%.*]]
15370 // CHECK13: omp.inner.for.cond131:
15371 // CHECK13-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
15372 // CHECK13-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
15373 // CHECK13-NEXT: [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
15374 // CHECK13-NEXT: br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
15375 // CHECK13: omp.inner.for.body133:
15376 // CHECK13-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
15377 // CHECK13-NEXT: [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
15378 // CHECK13-NEXT: [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
15379 // CHECK13-NEXT: store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
15380 // CHECK13-NEXT: [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
15381 // CHECK13-NEXT: [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
15382 // CHECK13-NEXT: [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
15383 // CHECK13-NEXT: [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
15384 // CHECK13-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
15385 // CHECK13-NEXT: [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
15386 // CHECK13-NEXT: [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
15387 // CHECK13-NEXT: [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
15388 // CHECK13-NEXT: [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
15389 // CHECK13-NEXT: [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
15390 // CHECK13-NEXT: [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
15391 // CHECK13-NEXT: [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
15392 // CHECK13-NEXT: [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
15393 // CHECK13-NEXT: [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
15394 // CHECK13-NEXT: [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
15395 // CHECK13-NEXT: store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
15396 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE143:%.*]]
15397 // CHECK13: omp.body.continue143:
15398 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC144:%.*]]
15399 // CHECK13: omp.inner.for.inc144:
15400 // CHECK13-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
15401 // CHECK13-NEXT: [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
15402 // CHECK13-NEXT: store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
15403 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
15404 // CHECK13: omp.inner.for.end146:
15405 // CHECK13-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
15406 // CHECK13-NEXT: [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
15407 // CHECK13-NEXT: [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
15408 // CHECK13-NEXT: [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
15409 // CHECK13-NEXT: [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
15410 // CHECK13-NEXT: store i32 [[ADD150]], i32* [[I130]], align 4
15411 // CHECK13-NEXT: br label [[SIMD_IF_END151]]
15412 // CHECK13: simd.if.end151:
15413 // CHECK13-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4
15414 // CHECK13-NEXT: store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
15415 // CHECK13-NEXT: [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
15416 // CHECK13-NEXT: [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
15417 // CHECK13-NEXT: [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
15418 // CHECK13-NEXT: [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
15419 // CHECK13-NEXT: store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
15420 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB158]], align 4
15421 // CHECK13-NEXT: [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
15422 // CHECK13-NEXT: store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
15423 // CHECK13-NEXT: store i32 0, i32* [[I160]], align 4
15424 // CHECK13-NEXT: [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
15425 // CHECK13-NEXT: [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
15426 // CHECK13-NEXT: br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
15427 // CHECK13: simd.if.then162:
15428 // CHECK13-NEXT: [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
15429 // CHECK13-NEXT: store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
15430 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND165:%.*]]
15431 // CHECK13: omp.inner.for.cond165:
15432 // CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
15433 // CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
15434 // CHECK13-NEXT: [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
15435 // CHECK13-NEXT: br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
15436 // CHECK13: omp.inner.for.body167:
15437 // CHECK13-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
15438 // CHECK13-NEXT: [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
15439 // CHECK13-NEXT: [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
15440 // CHECK13-NEXT: store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
15441 // CHECK13-NEXT: [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
15442 // CHECK13-NEXT: [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
15443 // CHECK13-NEXT: [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
15444 // CHECK13-NEXT: [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
15445 // CHECK13-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
15446 // CHECK13-NEXT: [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
15447 // CHECK13-NEXT: [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
15448 // CHECK13-NEXT: [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
15449 // CHECK13-NEXT: [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
15450 // CHECK13-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
15451 // CHECK13-NEXT: [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
15452 // CHECK13-NEXT: [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
15453 // CHECK13-NEXT: [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
15454 // CHECK13-NEXT: [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
15455 // CHECK13-NEXT: [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
15456 // CHECK13-NEXT: store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
15457 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE177:%.*]]
15458 // CHECK13: omp.body.continue177:
15459 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC178:%.*]]
15460 // CHECK13: omp.inner.for.inc178:
15461 // CHECK13-NEXT: [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
15462 // CHECK13-NEXT: [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
15463 // CHECK13-NEXT: store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
15464 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
15465 // CHECK13: omp.inner.for.end180:
15466 // CHECK13-NEXT: [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
15467 // CHECK13-NEXT: [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
15468 // CHECK13-NEXT: [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
15469 // CHECK13-NEXT: [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
15470 // CHECK13-NEXT: [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
15471 // CHECK13-NEXT: store i32 [[ADD184]], i32* [[I164]], align 4
15472 // CHECK13-NEXT: br label [[SIMD_IF_END185]]
15473 // CHECK13: simd.if.end185:
15474 // CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
15475 // CHECK13-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
15476 // CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[N]], align 4
15477 // CHECK13-NEXT: store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
15478 // CHECK13-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
15479 // CHECK13-NEXT: [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
15480 // CHECK13-NEXT: [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
15481 // CHECK13-NEXT: [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
15482 // CHECK13-NEXT: store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
15483 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB193]], align 4
15484 // CHECK13-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
15485 // CHECK13-NEXT: store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
15486 // CHECK13-NEXT: store i32 0, i32* [[I195]], align 4
15487 // CHECK13-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
15488 // CHECK13-NEXT: [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
15489 // CHECK13-NEXT: br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
15490 // CHECK13: simd.if.then197:
15491 // CHECK13-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
15492 // CHECK13-NEXT: store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
15493 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND200:%.*]]
15494 // CHECK13: omp.inner.for.cond200:
15495 // CHECK13-NEXT: [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15496 // CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
15497 // CHECK13-NEXT: [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
15498 // CHECK13-NEXT: br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
15499 // CHECK13: omp.inner.for.body202:
15500 // CHECK13-NEXT: [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15501 // CHECK13-NEXT: [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
15502 // CHECK13-NEXT: [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
15503 // CHECK13-NEXT: store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
15504 // CHECK13-NEXT: [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
15505 // CHECK13-NEXT: [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15506 // CHECK13-NEXT: [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
15507 // CHECK13-NEXT: [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
15508 // CHECK13-NEXT: [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
15509 // CHECK13-NEXT: [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
15510 // CHECK13-NEXT: [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15511 // CHECK13-NEXT: [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
15512 // CHECK13-NEXT: [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
15513 // CHECK13-NEXT: [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
15514 // CHECK13-NEXT: [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
15515 // CHECK13-NEXT: [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
15516 // CHECK13-NEXT: [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15517 // CHECK13-NEXT: [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
15518 // CHECK13-NEXT: [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
15519 // CHECK13-NEXT: store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
15520 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE212:%.*]]
15521 // CHECK13: omp.body.continue212:
15522 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC213:%.*]]
15523 // CHECK13: omp.inner.for.inc213:
15524 // CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15525 // CHECK13-NEXT: [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
15526 // CHECK13-NEXT: store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15527 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
15528 // CHECK13: omp.inner.for.end215:
15529 // CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
15530 // CHECK13-NEXT: [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
15531 // CHECK13-NEXT: [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
15532 // CHECK13-NEXT: [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
15533 // CHECK13-NEXT: [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
15534 // CHECK13-NEXT: store i32 [[ADD219]], i32* [[I199]], align 4
15535 // CHECK13-NEXT: br label [[SIMD_IF_END220]]
15536 // CHECK13: simd.if.end220:
15537 // CHECK13-NEXT: ret i32 0
15538 //
15539 //
15540 // CHECK15-LABEL: define {{[^@]+}}@main
15541 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
15542 // CHECK15-NEXT: entry:
15543 // CHECK15-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
15544 // CHECK15-NEXT: [[A:%.*]] = alloca double*, align 4
15545 // CHECK15-NEXT: [[B:%.*]] = alloca double*, align 4
15546 // CHECK15-NEXT: [[C:%.*]] = alloca double*, align 4
15547 // CHECK15-NEXT: [[N:%.*]] = alloca i32, align 4
15548 // CHECK15-NEXT: [[CH:%.*]] = alloca i32, align 4
15549 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
15550 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15551 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15552 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15553 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15554 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
15555 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15556 // CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
15557 // CHECK15-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
15558 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
15559 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
15560 // CHECK15-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4
15561 // CHECK15-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4
15562 // CHECK15-NEXT: [[I21:%.*]] = alloca i32, align 4
15563 // CHECK15-NEXT: [[DOTOMP_IV24:%.*]] = alloca i32, align 4
15564 // CHECK15-NEXT: [[I25:%.*]] = alloca i32, align 4
15565 // CHECK15-NEXT: [[_TMP44:%.*]] = alloca i32, align 4
15566 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
15567 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
15568 // CHECK15-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4
15569 // CHECK15-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4
15570 // CHECK15-NEXT: [[I52:%.*]] = alloca i32, align 4
15571 // CHECK15-NEXT: [[DOTOMP_IV55:%.*]] = alloca i32, align 4
15572 // CHECK15-NEXT: [[I56:%.*]] = alloca i32, align 4
15573 // CHECK15-NEXT: [[_TMP75:%.*]] = alloca i32, align 4
15574 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
15575 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
15576 // CHECK15-NEXT: [[DOTOMP_LB81:%.*]] = alloca i32, align 4
15577 // CHECK15-NEXT: [[DOTOMP_UB82:%.*]] = alloca i32, align 4
15578 // CHECK15-NEXT: [[I83:%.*]] = alloca i32, align 4
15579 // CHECK15-NEXT: [[DOTOMP_IV86:%.*]] = alloca i32, align 4
15580 // CHECK15-NEXT: [[I87:%.*]] = alloca i32, align 4
15581 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
15582 // CHECK15-NEXT: [[_TMP107:%.*]] = alloca i32, align 4
15583 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
15584 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
15585 // CHECK15-NEXT: [[DOTOMP_LB113:%.*]] = alloca i32, align 4
15586 // CHECK15-NEXT: [[DOTOMP_UB114:%.*]] = alloca i32, align 4
15587 // CHECK15-NEXT: [[I115:%.*]] = alloca i32, align 4
15588 // CHECK15-NEXT: [[DOTOMP_IV118:%.*]] = alloca i32, align 4
15589 // CHECK15-NEXT: [[I119:%.*]] = alloca i32, align 4
15590 // CHECK15-NEXT: [[_TMP138:%.*]] = alloca i32, align 4
15591 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
15592 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
15593 // CHECK15-NEXT: [[DOTOMP_LB144:%.*]] = alloca i32, align 4
15594 // CHECK15-NEXT: [[DOTOMP_UB145:%.*]] = alloca i32, align 4
15595 // CHECK15-NEXT: [[I146:%.*]] = alloca i32, align 4
15596 // CHECK15-NEXT: [[DOTOMP_IV149:%.*]] = alloca i32, align 4
15597 // CHECK15-NEXT: [[I150:%.*]] = alloca i32, align 4
15598 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
15599 // CHECK15-NEXT: [[_TMP170:%.*]] = alloca i32, align 4
15600 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
15601 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
15602 // CHECK15-NEXT: [[DOTOMP_LB176:%.*]] = alloca i32, align 4
15603 // CHECK15-NEXT: [[DOTOMP_UB177:%.*]] = alloca i32, align 4
15604 // CHECK15-NEXT: [[I178:%.*]] = alloca i32, align 4
15605 // CHECK15-NEXT: [[DOTOMP_IV181:%.*]] = alloca i32, align 4
15606 // CHECK15-NEXT: [[I182:%.*]] = alloca i32, align 4
15607 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4
15608 // CHECK15-NEXT: store i32 10000, i32* [[N]], align 4
15609 // CHECK15-NEXT: store i32 100, i32* [[CH]], align 4
15610 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15611 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15612 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15613 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
15614 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15615 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15616 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15617 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
15618 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15619 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
15620 // CHECK15-NEXT: store i32 0, i32* [[I]], align 4
15621 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15622 // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15623 // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15624 // CHECK15: simd.if.then:
15625 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15626 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
15627 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15628 // CHECK15: omp.inner.for.cond:
15629 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15630 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
15631 // CHECK15-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
15632 // CHECK15-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15633 // CHECK15: omp.inner.for.body:
15634 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15635 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
15636 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15637 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
15638 // CHECK15-NEXT: [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
15639 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15640 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
15641 // CHECK15-NEXT: [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
15642 // CHECK15-NEXT: [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
15643 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15644 // CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
15645 // CHECK15-NEXT: [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
15646 // CHECK15-NEXT: [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
15647 // CHECK15-NEXT: [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
15648 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15649 // CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
15650 // CHECK15-NEXT: store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
15651 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15652 // CHECK15: omp.body.continue:
15653 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15654 // CHECK15: omp.inner.for.inc:
15655 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15656 // CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
15657 // CHECK15-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15658 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
15659 // CHECK15: omp.inner.for.end:
15660 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15661 // CHECK15-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
15662 // CHECK15-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
15663 // CHECK15-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
15664 // CHECK15-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
15665 // CHECK15-NEXT: store i32 [[ADD12]], i32* [[I3]], align 4
15666 // CHECK15-NEXT: br label [[SIMD_IF_END]]
15667 // CHECK15: simd.if.end:
15668 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N]], align 4
15669 // CHECK15-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
15670 // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15671 // CHECK15-NEXT: [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
15672 // CHECK15-NEXT: [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
15673 // CHECK15-NEXT: [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
15674 // CHECK15-NEXT: store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
15675 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB19]], align 4
15676 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
15677 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
15678 // CHECK15-NEXT: store i32 0, i32* [[I21]], align 4
15679 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15680 // CHECK15-NEXT: [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
15681 // CHECK15-NEXT: br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
15682 // CHECK15: simd.if.then23:
15683 // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
15684 // CHECK15-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
15685 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]]
15686 // CHECK15: omp.inner.for.cond26:
15687 // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15688 // CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
15689 // CHECK15-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
15690 // CHECK15-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
15691 // CHECK15: omp.inner.for.body28:
15692 // CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15693 // CHECK15-NEXT: [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
15694 // CHECK15-NEXT: [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
15695 // CHECK15-NEXT: store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
15696 // CHECK15-NEXT: [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
15697 // CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15698 // CHECK15-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
15699 // CHECK15-NEXT: [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
15700 // CHECK15-NEXT: [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
15701 // CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15702 // CHECK15-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
15703 // CHECK15-NEXT: [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
15704 // CHECK15-NEXT: [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
15705 // CHECK15-NEXT: [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
15706 // CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15707 // CHECK15-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
15708 // CHECK15-NEXT: store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
15709 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE35:%.*]]
15710 // CHECK15: omp.body.continue35:
15711 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC36:%.*]]
15712 // CHECK15: omp.inner.for.inc36:
15713 // CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15714 // CHECK15-NEXT: [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
15715 // CHECK15-NEXT: store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15716 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
15717 // CHECK15: omp.inner.for.end38:
15718 // CHECK15-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15719 // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
15720 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
15721 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
15722 // CHECK15-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
15723 // CHECK15-NEXT: store i32 [[ADD42]], i32* [[I25]], align 4
15724 // CHECK15-NEXT: br label [[SIMD_IF_END43]]
15725 // CHECK15: simd.if.end43:
15726 // CHECK15-NEXT: [[TMP36:%.*]] = load i32, i32* [[N]], align 4
15727 // CHECK15-NEXT: store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
15728 // CHECK15-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15729 // CHECK15-NEXT: [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
15730 // CHECK15-NEXT: [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
15731 // CHECK15-NEXT: [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
15732 // CHECK15-NEXT: store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
15733 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4
15734 // CHECK15-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
15735 // CHECK15-NEXT: store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
15736 // CHECK15-NEXT: store i32 0, i32* [[I52]], align 4
15737 // CHECK15-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15738 // CHECK15-NEXT: [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
15739 // CHECK15-NEXT: br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
15740 // CHECK15: simd.if.then54:
15741 // CHECK15-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
15742 // CHECK15-NEXT: store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
15743 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND57:%.*]]
15744 // CHECK15: omp.inner.for.cond57:
15745 // CHECK15-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15746 // CHECK15-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
15747 // CHECK15-NEXT: [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
15748 // CHECK15-NEXT: br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
15749 // CHECK15: omp.inner.for.body59:
15750 // CHECK15-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15751 // CHECK15-NEXT: [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
15752 // CHECK15-NEXT: [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
15753 // CHECK15-NEXT: store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
15754 // CHECK15-NEXT: [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
15755 // CHECK15-NEXT: [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15756 // CHECK15-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
15757 // CHECK15-NEXT: [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
15758 // CHECK15-NEXT: [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
15759 // CHECK15-NEXT: [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15760 // CHECK15-NEXT: [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
15761 // CHECK15-NEXT: [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
15762 // CHECK15-NEXT: [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
15763 // CHECK15-NEXT: [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
15764 // CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15765 // CHECK15-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
15766 // CHECK15-NEXT: store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
15767 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE66:%.*]]
15768 // CHECK15: omp.body.continue66:
15769 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC67:%.*]]
15770 // CHECK15: omp.inner.for.inc67:
15771 // CHECK15-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15772 // CHECK15-NEXT: [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
15773 // CHECK15-NEXT: store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15774 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
15775 // CHECK15: omp.inner.for.end69:
15776 // CHECK15-NEXT: [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15777 // CHECK15-NEXT: [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
15778 // CHECK15-NEXT: [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
15779 // CHECK15-NEXT: [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
15780 // CHECK15-NEXT: [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
15781 // CHECK15-NEXT: store i32 [[ADD73]], i32* [[I56]], align 4
15782 // CHECK15-NEXT: br label [[SIMD_IF_END74]]
15783 // CHECK15: simd.if.end74:
15784 // CHECK15-NEXT: [[TMP54:%.*]] = load i32, i32* [[N]], align 4
15785 // CHECK15-NEXT: store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
15786 // CHECK15-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15787 // CHECK15-NEXT: [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
15788 // CHECK15-NEXT: [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
15789 // CHECK15-NEXT: [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
15790 // CHECK15-NEXT: store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
15791 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB81]], align 4
15792 // CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
15793 // CHECK15-NEXT: store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
15794 // CHECK15-NEXT: store i32 0, i32* [[I83]], align 4
15795 // CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15796 // CHECK15-NEXT: [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
15797 // CHECK15-NEXT: br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
15798 // CHECK15: simd.if.then85:
15799 // CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
15800 // CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
15801 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND88:%.*]]
15802 // CHECK15: omp.inner.for.cond88:
15803 // CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15804 // CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
15805 // CHECK15-NEXT: [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
15806 // CHECK15-NEXT: br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
15807 // CHECK15: omp.inner.for.body90:
15808 // CHECK15-NEXT: [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15809 // CHECK15-NEXT: [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
15810 // CHECK15-NEXT: [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
15811 // CHECK15-NEXT: store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
15812 // CHECK15-NEXT: [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
15813 // CHECK15-NEXT: [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15814 // CHECK15-NEXT: [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
15815 // CHECK15-NEXT: [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
15816 // CHECK15-NEXT: [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
15817 // CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15818 // CHECK15-NEXT: [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
15819 // CHECK15-NEXT: [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
15820 // CHECK15-NEXT: [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
15821 // CHECK15-NEXT: [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
15822 // CHECK15-NEXT: [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15823 // CHECK15-NEXT: [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
15824 // CHECK15-NEXT: store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
15825 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]]
15826 // CHECK15: omp.body.continue97:
15827 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]]
15828 // CHECK15: omp.inner.for.inc98:
15829 // CHECK15-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15830 // CHECK15-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
15831 // CHECK15-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15832 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
15833 // CHECK15: omp.inner.for.end100:
15834 // CHECK15-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15835 // CHECK15-NEXT: [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
15836 // CHECK15-NEXT: [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
15837 // CHECK15-NEXT: [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
15838 // CHECK15-NEXT: [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
15839 // CHECK15-NEXT: store i32 [[ADD104]], i32* [[I87]], align 4
15840 // CHECK15-NEXT: br label [[SIMD_IF_END105]]
15841 // CHECK15: simd.if.end105:
15842 // CHECK15-NEXT: [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
15843 // CHECK15-NEXT: store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
15844 // CHECK15-NEXT: [[TMP73:%.*]] = load i32, i32* [[N]], align 4
15845 // CHECK15-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
15846 // CHECK15-NEXT: [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15847 // CHECK15-NEXT: [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
15848 // CHECK15-NEXT: [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
15849 // CHECK15-NEXT: [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
15850 // CHECK15-NEXT: store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
15851 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB113]], align 4
15852 // CHECK15-NEXT: [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
15853 // CHECK15-NEXT: store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
15854 // CHECK15-NEXT: store i32 0, i32* [[I115]], align 4
15855 // CHECK15-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15856 // CHECK15-NEXT: [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
15857 // CHECK15-NEXT: br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
15858 // CHECK15: simd.if.then117:
15859 // CHECK15-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
15860 // CHECK15-NEXT: store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
15861 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND120:%.*]]
15862 // CHECK15: omp.inner.for.cond120:
15863 // CHECK15-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15864 // CHECK15-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
15865 // CHECK15-NEXT: [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
15866 // CHECK15-NEXT: br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
15867 // CHECK15: omp.inner.for.body122:
15868 // CHECK15-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15869 // CHECK15-NEXT: [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
15870 // CHECK15-NEXT: [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
15871 // CHECK15-NEXT: store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
15872 // CHECK15-NEXT: [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
15873 // CHECK15-NEXT: [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15874 // CHECK15-NEXT: [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
15875 // CHECK15-NEXT: [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
15876 // CHECK15-NEXT: [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
15877 // CHECK15-NEXT: [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15878 // CHECK15-NEXT: [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
15879 // CHECK15-NEXT: [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
15880 // CHECK15-NEXT: [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
15881 // CHECK15-NEXT: [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
15882 // CHECK15-NEXT: [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15883 // CHECK15-NEXT: [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
15884 // CHECK15-NEXT: store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
15885 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE129:%.*]]
15886 // CHECK15: omp.body.continue129:
15887 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC130:%.*]]
15888 // CHECK15: omp.inner.for.inc130:
15889 // CHECK15-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15890 // CHECK15-NEXT: [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
15891 // CHECK15-NEXT: store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15892 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
15893 // CHECK15: omp.inner.for.end132:
15894 // CHECK15-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15895 // CHECK15-NEXT: [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
15896 // CHECK15-NEXT: [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
15897 // CHECK15-NEXT: [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
15898 // CHECK15-NEXT: [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
15899 // CHECK15-NEXT: store i32 [[ADD136]], i32* [[I119]], align 4
15900 // CHECK15-NEXT: br label [[SIMD_IF_END137]]
15901 // CHECK15: simd.if.end137:
15902 // CHECK15-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4
15903 // CHECK15-NEXT: store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
15904 // CHECK15-NEXT: [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15905 // CHECK15-NEXT: [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
15906 // CHECK15-NEXT: [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
15907 // CHECK15-NEXT: [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
15908 // CHECK15-NEXT: store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
15909 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB144]], align 4
15910 // CHECK15-NEXT: [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
15911 // CHECK15-NEXT: store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
15912 // CHECK15-NEXT: store i32 0, i32* [[I146]], align 4
15913 // CHECK15-NEXT: [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15914 // CHECK15-NEXT: [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
15915 // CHECK15-NEXT: br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
15916 // CHECK15: simd.if.then148:
15917 // CHECK15-NEXT: [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
15918 // CHECK15-NEXT: store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
15919 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND151:%.*]]
15920 // CHECK15: omp.inner.for.cond151:
15921 // CHECK15-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15922 // CHECK15-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
15923 // CHECK15-NEXT: [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
15924 // CHECK15-NEXT: br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
15925 // CHECK15: omp.inner.for.body153:
15926 // CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15927 // CHECK15-NEXT: [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
15928 // CHECK15-NEXT: [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
15929 // CHECK15-NEXT: store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
15930 // CHECK15-NEXT: [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
15931 // CHECK15-NEXT: [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15932 // CHECK15-NEXT: [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
15933 // CHECK15-NEXT: [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
15934 // CHECK15-NEXT: [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
15935 // CHECK15-NEXT: [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15936 // CHECK15-NEXT: [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
15937 // CHECK15-NEXT: [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
15938 // CHECK15-NEXT: [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
15939 // CHECK15-NEXT: [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
15940 // CHECK15-NEXT: [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15941 // CHECK15-NEXT: [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
15942 // CHECK15-NEXT: store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
15943 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE160:%.*]]
15944 // CHECK15: omp.body.continue160:
15945 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC161:%.*]]
15946 // CHECK15: omp.inner.for.inc161:
15947 // CHECK15-NEXT: [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15948 // CHECK15-NEXT: [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
15949 // CHECK15-NEXT: store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15950 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
15951 // CHECK15: omp.inner.for.end163:
15952 // CHECK15-NEXT: [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15953 // CHECK15-NEXT: [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
15954 // CHECK15-NEXT: [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
15955 // CHECK15-NEXT: [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
15956 // CHECK15-NEXT: [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
15957 // CHECK15-NEXT: store i32 [[ADD167]], i32* [[I150]], align 4
15958 // CHECK15-NEXT: br label [[SIMD_IF_END168]]
15959 // CHECK15: simd.if.end168:
15960 // CHECK15-NEXT: [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
15961 // CHECK15-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
15962 // CHECK15-NEXT: [[TMP110:%.*]] = load i32, i32* [[N]], align 4
15963 // CHECK15-NEXT: store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
15964 // CHECK15-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15965 // CHECK15-NEXT: [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
15966 // CHECK15-NEXT: [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
15967 // CHECK15-NEXT: [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
15968 // CHECK15-NEXT: store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
15969 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB176]], align 4
15970 // CHECK15-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
15971 // CHECK15-NEXT: store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
15972 // CHECK15-NEXT: store i32 0, i32* [[I178]], align 4
15973 // CHECK15-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15974 // CHECK15-NEXT: [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
15975 // CHECK15-NEXT: br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
15976 // CHECK15: simd.if.then180:
15977 // CHECK15-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
15978 // CHECK15-NEXT: store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
15979 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND183:%.*]]
15980 // CHECK15: omp.inner.for.cond183:
15981 // CHECK15-NEXT: [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15982 // CHECK15-NEXT: [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
15983 // CHECK15-NEXT: [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
15984 // CHECK15-NEXT: br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
15985 // CHECK15: omp.inner.for.body185:
15986 // CHECK15-NEXT: [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15987 // CHECK15-NEXT: [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
15988 // CHECK15-NEXT: [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
15989 // CHECK15-NEXT: store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
15990 // CHECK15-NEXT: [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
15991 // CHECK15-NEXT: [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
15992 // CHECK15-NEXT: [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
15993 // CHECK15-NEXT: [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
15994 // CHECK15-NEXT: [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
15995 // CHECK15-NEXT: [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
15996 // CHECK15-NEXT: [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
15997 // CHECK15-NEXT: [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
15998 // CHECK15-NEXT: [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
15999 // CHECK15-NEXT: [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
16000 // CHECK15-NEXT: [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
16001 // CHECK15-NEXT: [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
16002 // CHECK15-NEXT: store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
16003 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE192:%.*]]
16004 // CHECK15: omp.body.continue192:
16005 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC193:%.*]]
16006 // CHECK15: omp.inner.for.inc193:
16007 // CHECK15-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
16008 // CHECK15-NEXT: [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
16009 // CHECK15-NEXT: store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
16010 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
16011 // CHECK15: omp.inner.for.end195:
16012 // CHECK15-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
16013 // CHECK15-NEXT: [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
16014 // CHECK15-NEXT: [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
16015 // CHECK15-NEXT: [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
16016 // CHECK15-NEXT: [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
16017 // CHECK15-NEXT: store i32 [[ADD199]], i32* [[I182]], align 4
16018 // CHECK15-NEXT: br label [[SIMD_IF_END200]]
16019 // CHECK15: simd.if.end200:
16020 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
16021 // CHECK15-NEXT: ret i32 [[CALL]]
16022 //
16023 //
16024 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
16025 // CHECK15-SAME: () #[[ATTR1:[0-9]+]] comdat {
16026 // CHECK15-NEXT: entry:
16027 // CHECK15-NEXT: [[A:%.*]] = alloca i32*, align 4
16028 // CHECK15-NEXT: [[B:%.*]] = alloca i32*, align 4
16029 // CHECK15-NEXT: [[C:%.*]] = alloca i32*, align 4
16030 // CHECK15-NEXT: [[N:%.*]] = alloca i32, align 4
16031 // CHECK15-NEXT: [[CH:%.*]] = alloca i32, align 4
16032 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
16033 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16034 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16035 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16036 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16037 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
16038 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16039 // CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
16040 // CHECK15-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
16041 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
16042 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
16043 // CHECK15-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4
16044 // CHECK15-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4
16045 // CHECK15-NEXT: [[I21:%.*]] = alloca i32, align 4
16046 // CHECK15-NEXT: [[DOTOMP_IV24:%.*]] = alloca i32, align 4
16047 // CHECK15-NEXT: [[I25:%.*]] = alloca i32, align 4
16048 // CHECK15-NEXT: [[_TMP44:%.*]] = alloca i32, align 4
16049 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
16050 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
16051 // CHECK15-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4
16052 // CHECK15-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4
16053 // CHECK15-NEXT: [[I52:%.*]] = alloca i32, align 4
16054 // CHECK15-NEXT: [[DOTOMP_IV55:%.*]] = alloca i32, align 4
16055 // CHECK15-NEXT: [[I56:%.*]] = alloca i32, align 4
16056 // CHECK15-NEXT: [[_TMP75:%.*]] = alloca i32, align 4
16057 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
16058 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
16059 // CHECK15-NEXT: [[DOTOMP_LB81:%.*]] = alloca i32, align 4
16060 // CHECK15-NEXT: [[DOTOMP_UB82:%.*]] = alloca i32, align 4
16061 // CHECK15-NEXT: [[I83:%.*]] = alloca i32, align 4
16062 // CHECK15-NEXT: [[DOTOMP_IV86:%.*]] = alloca i32, align 4
16063 // CHECK15-NEXT: [[I87:%.*]] = alloca i32, align 4
16064 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
16065 // CHECK15-NEXT: [[_TMP107:%.*]] = alloca i32, align 4
16066 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
16067 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
16068 // CHECK15-NEXT: [[DOTOMP_LB113:%.*]] = alloca i32, align 4
16069 // CHECK15-NEXT: [[DOTOMP_UB114:%.*]] = alloca i32, align 4
16070 // CHECK15-NEXT: [[I115:%.*]] = alloca i32, align 4
16071 // CHECK15-NEXT: [[DOTOMP_IV118:%.*]] = alloca i32, align 4
16072 // CHECK15-NEXT: [[I119:%.*]] = alloca i32, align 4
16073 // CHECK15-NEXT: [[_TMP138:%.*]] = alloca i32, align 4
16074 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
16075 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
16076 // CHECK15-NEXT: [[DOTOMP_LB144:%.*]] = alloca i32, align 4
16077 // CHECK15-NEXT: [[DOTOMP_UB145:%.*]] = alloca i32, align 4
16078 // CHECK15-NEXT: [[I146:%.*]] = alloca i32, align 4
16079 // CHECK15-NEXT: [[DOTOMP_IV149:%.*]] = alloca i32, align 4
16080 // CHECK15-NEXT: [[I150:%.*]] = alloca i32, align 4
16081 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
16082 // CHECK15-NEXT: [[_TMP170:%.*]] = alloca i32, align 4
16083 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
16084 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
16085 // CHECK15-NEXT: [[DOTOMP_LB176:%.*]] = alloca i32, align 4
16086 // CHECK15-NEXT: [[DOTOMP_UB177:%.*]] = alloca i32, align 4
16087 // CHECK15-NEXT: [[I178:%.*]] = alloca i32, align 4
16088 // CHECK15-NEXT: [[DOTOMP_IV181:%.*]] = alloca i32, align 4
16089 // CHECK15-NEXT: [[I182:%.*]] = alloca i32, align 4
16090 // CHECK15-NEXT: store i32 10000, i32* [[N]], align 4
16091 // CHECK15-NEXT: store i32 100, i32* [[CH]], align 4
16092 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
16093 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
16094 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16095 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
16096 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16097 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16098 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16099 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
16100 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16101 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
16102 // CHECK15-NEXT: store i32 0, i32* [[I]], align 4
16103 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16104 // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
16105 // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16106 // CHECK15: simd.if.then:
16107 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16108 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
16109 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16110 // CHECK15: omp.inner.for.cond:
16111 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16112 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
16113 // CHECK15-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
16114 // CHECK15-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16115 // CHECK15: omp.inner.for.body:
16116 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16117 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
16118 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16119 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
16120 // CHECK15-NEXT: [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
16121 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
16122 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
16123 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
16124 // CHECK15-NEXT: [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
16125 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
16126 // CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
16127 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
16128 // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
16129 // CHECK15-NEXT: [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
16130 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
16131 // CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
16132 // CHECK15-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
16133 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16134 // CHECK15: omp.body.continue:
16135 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16136 // CHECK15: omp.inner.for.inc:
16137 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16138 // CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
16139 // CHECK15-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16140 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
16141 // CHECK15: omp.inner.for.end:
16142 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16143 // CHECK15-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
16144 // CHECK15-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
16145 // CHECK15-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
16146 // CHECK15-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
16147 // CHECK15-NEXT: store i32 [[ADD12]], i32* [[I3]], align 4
16148 // CHECK15-NEXT: br label [[SIMD_IF_END]]
16149 // CHECK15: simd.if.end:
16150 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N]], align 4
16151 // CHECK15-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
16152 // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
16153 // CHECK15-NEXT: [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
16154 // CHECK15-NEXT: [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
16155 // CHECK15-NEXT: [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
16156 // CHECK15-NEXT: store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
16157 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB19]], align 4
16158 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
16159 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
16160 // CHECK15-NEXT: store i32 0, i32* [[I21]], align 4
16161 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
16162 // CHECK15-NEXT: [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
16163 // CHECK15-NEXT: br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
16164 // CHECK15: simd.if.then23:
16165 // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
16166 // CHECK15-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
16167 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]]
16168 // CHECK15: omp.inner.for.cond26:
16169 // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
16170 // CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
16171 // CHECK15-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
16172 // CHECK15-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
16173 // CHECK15: omp.inner.for.body28:
16174 // CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
16175 // CHECK15-NEXT: [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
16176 // CHECK15-NEXT: [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
16177 // CHECK15-NEXT: store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
16178 // CHECK15-NEXT: [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
16179 // CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
16180 // CHECK15-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
16181 // CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
16182 // CHECK15-NEXT: [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
16183 // CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
16184 // CHECK15-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
16185 // CHECK15-NEXT: [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
16186 // CHECK15-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
16187 // CHECK15-NEXT: [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
16188 // CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
16189 // CHECK15-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
16190 // CHECK15-NEXT: store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
16191 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE35:%.*]]
16192 // CHECK15: omp.body.continue35:
16193 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC36:%.*]]
16194 // CHECK15: omp.inner.for.inc36:
16195 // CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
16196 // CHECK15-NEXT: [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
16197 // CHECK15-NEXT: store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
16198 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
16199 // CHECK15: omp.inner.for.end38:
16200 // CHECK15-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
16201 // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
16202 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
16203 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
16204 // CHECK15-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
16205 // CHECK15-NEXT: store i32 [[ADD42]], i32* [[I25]], align 4
16206 // CHECK15-NEXT: br label [[SIMD_IF_END43]]
16207 // CHECK15: simd.if.end43:
16208 // CHECK15-NEXT: [[TMP36:%.*]] = load i32, i32* [[N]], align 4
16209 // CHECK15-NEXT: store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
16210 // CHECK15-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
16211 // CHECK15-NEXT: [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
16212 // CHECK15-NEXT: [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
16213 // CHECK15-NEXT: [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
16214 // CHECK15-NEXT: store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
16215 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4
16216 // CHECK15-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
16217 // CHECK15-NEXT: store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
16218 // CHECK15-NEXT: store i32 0, i32* [[I52]], align 4
16219 // CHECK15-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
16220 // CHECK15-NEXT: [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
16221 // CHECK15-NEXT: br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
16222 // CHECK15: simd.if.then54:
16223 // CHECK15-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
16224 // CHECK15-NEXT: store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
16225 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND57:%.*]]
16226 // CHECK15: omp.inner.for.cond57:
16227 // CHECK15-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
16228 // CHECK15-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
16229 // CHECK15-NEXT: [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
16230 // CHECK15-NEXT: br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
16231 // CHECK15: omp.inner.for.body59:
16232 // CHECK15-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
16233 // CHECK15-NEXT: [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
16234 // CHECK15-NEXT: [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
16235 // CHECK15-NEXT: store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
16236 // CHECK15-NEXT: [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
16237 // CHECK15-NEXT: [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
16238 // CHECK15-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
16239 // CHECK15-NEXT: [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
16240 // CHECK15-NEXT: [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
16241 // CHECK15-NEXT: [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
16242 // CHECK15-NEXT: [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
16243 // CHECK15-NEXT: [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
16244 // CHECK15-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
16245 // CHECK15-NEXT: [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
16246 // CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
16247 // CHECK15-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
16248 // CHECK15-NEXT: store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
16249 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE66:%.*]]
16250 // CHECK15: omp.body.continue66:
16251 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC67:%.*]]
16252 // CHECK15: omp.inner.for.inc67:
16253 // CHECK15-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
16254 // CHECK15-NEXT: [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
16255 // CHECK15-NEXT: store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
16256 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
16257 // CHECK15: omp.inner.for.end69:
16258 // CHECK15-NEXT: [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
16259 // CHECK15-NEXT: [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
16260 // CHECK15-NEXT: [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
16261 // CHECK15-NEXT: [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
16262 // CHECK15-NEXT: [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
16263 // CHECK15-NEXT: store i32 [[ADD73]], i32* [[I56]], align 4
16264 // CHECK15-NEXT: br label [[SIMD_IF_END74]]
16265 // CHECK15: simd.if.end74:
16266 // CHECK15-NEXT: [[TMP54:%.*]] = load i32, i32* [[N]], align 4
16267 // CHECK15-NEXT: store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
16268 // CHECK15-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
16269 // CHECK15-NEXT: [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
16270 // CHECK15-NEXT: [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
16271 // CHECK15-NEXT: [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
16272 // CHECK15-NEXT: store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
16273 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB81]], align 4
16274 // CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
16275 // CHECK15-NEXT: store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
16276 // CHECK15-NEXT: store i32 0, i32* [[I83]], align 4
16277 // CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
16278 // CHECK15-NEXT: [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
16279 // CHECK15-NEXT: br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
16280 // CHECK15: simd.if.then85:
16281 // CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
16282 // CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
16283 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND88:%.*]]
16284 // CHECK15: omp.inner.for.cond88:
16285 // CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
16286 // CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
16287 // CHECK15-NEXT: [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
16288 // CHECK15-NEXT: br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
16289 // CHECK15: omp.inner.for.body90:
16290 // CHECK15-NEXT: [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
16291 // CHECK15-NEXT: [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
16292 // CHECK15-NEXT: [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
16293 // CHECK15-NEXT: store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
16294 // CHECK15-NEXT: [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
16295 // CHECK15-NEXT: [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
16296 // CHECK15-NEXT: [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
16297 // CHECK15-NEXT: [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
16298 // CHECK15-NEXT: [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
16299 // CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
16300 // CHECK15-NEXT: [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
16301 // CHECK15-NEXT: [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
16302 // CHECK15-NEXT: [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
16303 // CHECK15-NEXT: [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
16304 // CHECK15-NEXT: [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
16305 // CHECK15-NEXT: [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
16306 // CHECK15-NEXT: store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
16307 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]]
16308 // CHECK15: omp.body.continue97:
16309 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]]
16310 // CHECK15: omp.inner.for.inc98:
16311 // CHECK15-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
16312 // CHECK15-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
16313 // CHECK15-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
16314 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
16315 // CHECK15: omp.inner.for.end100:
16316 // CHECK15-NEXT: [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
16317 // CHECK15-NEXT: [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
16318 // CHECK15-NEXT: [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
16319 // CHECK15-NEXT: [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
16320 // CHECK15-NEXT: [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
16321 // CHECK15-NEXT: store i32 [[ADD104]], i32* [[I87]], align 4
16322 // CHECK15-NEXT: br label [[SIMD_IF_END105]]
16323 // CHECK15: simd.if.end105:
16324 // CHECK15-NEXT: [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
16325 // CHECK15-NEXT: store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
16326 // CHECK15-NEXT: [[TMP73:%.*]] = load i32, i32* [[N]], align 4
16327 // CHECK15-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
16328 // CHECK15-NEXT: [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
16329 // CHECK15-NEXT: [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
16330 // CHECK15-NEXT: [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
16331 // CHECK15-NEXT: [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
16332 // CHECK15-NEXT: store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
16333 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB113]], align 4
16334 // CHECK15-NEXT: [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
16335 // CHECK15-NEXT: store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
16336 // CHECK15-NEXT: store i32 0, i32* [[I115]], align 4
16337 // CHECK15-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
16338 // CHECK15-NEXT: [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
16339 // CHECK15-NEXT: br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
16340 // CHECK15: simd.if.then117:
16341 // CHECK15-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
16342 // CHECK15-NEXT: store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
16343 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND120:%.*]]
16344 // CHECK15: omp.inner.for.cond120:
16345 // CHECK15-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
16346 // CHECK15-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
16347 // CHECK15-NEXT: [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
16348 // CHECK15-NEXT: br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
16349 // CHECK15: omp.inner.for.body122:
16350 // CHECK15-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
16351 // CHECK15-NEXT: [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
16352 // CHECK15-NEXT: [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
16353 // CHECK15-NEXT: store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
16354 // CHECK15-NEXT: [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
16355 // CHECK15-NEXT: [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
16356 // CHECK15-NEXT: [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
16357 // CHECK15-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
16358 // CHECK15-NEXT: [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
16359 // CHECK15-NEXT: [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
16360 // CHECK15-NEXT: [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
16361 // CHECK15-NEXT: [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
16362 // CHECK15-NEXT: [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
16363 // CHECK15-NEXT: [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
16364 // CHECK15-NEXT: [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
16365 // CHECK15-NEXT: [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
16366 // CHECK15-NEXT: store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
16367 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE129:%.*]]
16368 // CHECK15: omp.body.continue129:
16369 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC130:%.*]]
16370 // CHECK15: omp.inner.for.inc130:
16371 // CHECK15-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
16372 // CHECK15-NEXT: [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
16373 // CHECK15-NEXT: store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
16374 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
16375 // CHECK15: omp.inner.for.end132:
16376 // CHECK15-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
16377 // CHECK15-NEXT: [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
16378 // CHECK15-NEXT: [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
16379 // CHECK15-NEXT: [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
16380 // CHECK15-NEXT: [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
16381 // CHECK15-NEXT: store i32 [[ADD136]], i32* [[I119]], align 4
16382 // CHECK15-NEXT: br label [[SIMD_IF_END137]]
16383 // CHECK15: simd.if.end137:
16384 // CHECK15-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4
16385 // CHECK15-NEXT: store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
16386 // CHECK15-NEXT: [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
16387 // CHECK15-NEXT: [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
16388 // CHECK15-NEXT: [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
16389 // CHECK15-NEXT: [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
16390 // CHECK15-NEXT: store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
16391 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB144]], align 4
16392 // CHECK15-NEXT: [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
16393 // CHECK15-NEXT: store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
16394 // CHECK15-NEXT: store i32 0, i32* [[I146]], align 4
16395 // CHECK15-NEXT: [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
16396 // CHECK15-NEXT: [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
16397 // CHECK15-NEXT: br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
16398 // CHECK15: simd.if.then148:
16399 // CHECK15-NEXT: [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
16400 // CHECK15-NEXT: store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
16401 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND151:%.*]]
16402 // CHECK15: omp.inner.for.cond151:
16403 // CHECK15-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
16404 // CHECK15-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
16405 // CHECK15-NEXT: [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
16406 // CHECK15-NEXT: br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
16407 // CHECK15: omp.inner.for.body153:
16408 // CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
16409 // CHECK15-NEXT: [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
16410 // CHECK15-NEXT: [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
16411 // CHECK15-NEXT: store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
16412 // CHECK15-NEXT: [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
16413 // CHECK15-NEXT: [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
16414 // CHECK15-NEXT: [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
16415 // CHECK15-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
16416 // CHECK15-NEXT: [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
16417 // CHECK15-NEXT: [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
16418 // CHECK15-NEXT: [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
16419 // CHECK15-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
16420 // CHECK15-NEXT: [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
16421 // CHECK15-NEXT: [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
16422 // CHECK15-NEXT: [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
16423 // CHECK15-NEXT: [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
16424 // CHECK15-NEXT: store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
16425 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE160:%.*]]
16426 // CHECK15: omp.body.continue160:
16427 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC161:%.*]]
16428 // CHECK15: omp.inner.for.inc161:
16429 // CHECK15-NEXT: [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
16430 // CHECK15-NEXT: [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
16431 // CHECK15-NEXT: store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
16432 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
16433 // CHECK15: omp.inner.for.end163:
16434 // CHECK15-NEXT: [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
16435 // CHECK15-NEXT: [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
16436 // CHECK15-NEXT: [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
16437 // CHECK15-NEXT: [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
16438 // CHECK15-NEXT: [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
16439 // CHECK15-NEXT: store i32 [[ADD167]], i32* [[I150]], align 4
16440 // CHECK15-NEXT: br label [[SIMD_IF_END168]]
16441 // CHECK15: simd.if.end168:
16442 // CHECK15-NEXT: [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
16443 // CHECK15-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
16444 // CHECK15-NEXT: [[TMP110:%.*]] = load i32, i32* [[N]], align 4
16445 // CHECK15-NEXT: store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
16446 // CHECK15-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
16447 // CHECK15-NEXT: [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
16448 // CHECK15-NEXT: [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
16449 // CHECK15-NEXT: [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
16450 // CHECK15-NEXT: store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
16451 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB176]], align 4
16452 // CHECK15-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
16453 // CHECK15-NEXT: store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
16454 // CHECK15-NEXT: store i32 0, i32* [[I178]], align 4
16455 // CHECK15-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
16456 // CHECK15-NEXT: [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
16457 // CHECK15-NEXT: br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
16458 // CHECK15: simd.if.then180:
16459 // CHECK15-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
16460 // CHECK15-NEXT: store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
16461 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND183:%.*]]
16462 // CHECK15: omp.inner.for.cond183:
16463 // CHECK15-NEXT: [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
16464 // CHECK15-NEXT: [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
16465 // CHECK15-NEXT: [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
16466 // CHECK15-NEXT: br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
16467 // CHECK15: omp.inner.for.body185:
16468 // CHECK15-NEXT: [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
16469 // CHECK15-NEXT: [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
16470 // CHECK15-NEXT: [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
16471 // CHECK15-NEXT: store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
16472 // CHECK15-NEXT: [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
16473 // CHECK15-NEXT: [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
16474 // CHECK15-NEXT: [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
16475 // CHECK15-NEXT: [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
16476 // CHECK15-NEXT: [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
16477 // CHECK15-NEXT: [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
16478 // CHECK15-NEXT: [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
16479 // CHECK15-NEXT: [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
16480 // CHECK15-NEXT: [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
16481 // CHECK15-NEXT: [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
16482 // CHECK15-NEXT: [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
16483 // CHECK15-NEXT: [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
16484 // CHECK15-NEXT: store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
16485 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE192:%.*]]
16486 // CHECK15: omp.body.continue192:
16487 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC193:%.*]]
16488 // CHECK15: omp.inner.for.inc193:
16489 // CHECK15-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
16490 // CHECK15-NEXT: [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
16491 // CHECK15-NEXT: store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
16492 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
16493 // CHECK15: omp.inner.for.end195:
16494 // CHECK15-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
16495 // CHECK15-NEXT: [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
16496 // CHECK15-NEXT: [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
16497 // CHECK15-NEXT: [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
16498 // CHECK15-NEXT: [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
16499 // CHECK15-NEXT: store i32 [[ADD199]], i32* [[I182]], align 4
16500 // CHECK15-NEXT: br label [[SIMD_IF_END200]]
16501 // CHECK15: simd.if.end200:
16502 // CHECK15-NEXT: ret i32 0
16503 //
16504