1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -fopenmp-version=51 -x c -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
5 
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -fopenmp-version=51 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
9 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
10 // expected-no-diagnostics
11 
12 #ifndef HEADER
13 #define HEADER
14 
15 void foo(void) {
16   char cx, ce, cd;
17   unsigned char ucx, uce, ucd;
18   short sx, se, sd;
19   unsigned short usx, use, usd;
20   int ix, ie, id;
21   unsigned int uix, uie, uid;
22   long lx, le, ld;
23   unsigned long ulx, ule, uld;
24   long long llx, lle, lld;
25   unsigned long long ullx, ulle, ulld;
26 
27 #pragma omp atomic compare
28   cx = cx > ce ? ce : cx;
29 #pragma omp atomic compare
30   cx = cx < ce ? ce : cx;
31 #pragma omp atomic compare
32   cx = ce > cx ? ce : cx;
33 #pragma omp atomic compare
34   cx = ce < cx ? ce : cx;
35 #pragma omp atomic compare
36   if (cx > ce)
37     cx = ce;
38 #pragma omp atomic compare
39   if (cx < ce)
40     cx = ce;
41 #pragma omp atomic compare
42   if (ce > cx)
43     cx = ce;
44 #pragma omp atomic compare
45   if (ce < cx)
46     cx = ce;
47 
48 #pragma omp atomic compare
49   cx = cx == ce ? cd : cx;
50 #pragma omp atomic compare
51   cx = ce == cx ? cd : cx;
52 #pragma omp atomic compare
53   if (cx == ce)
54     cx = cd;
55 #pragma omp atomic compare
56   if (ce == cx)
57     cx = cd;
58 
59 #pragma omp atomic compare
60   ucx = ucx > uce ? uce : ucx;
61 #pragma omp atomic compare
62   ucx = ucx < uce ? uce : ucx;
63 #pragma omp atomic compare
64   ucx = uce > ucx ? uce : ucx;
65 #pragma omp atomic compare
66   ucx = uce < ucx ? uce : ucx;
67 #pragma omp atomic compare
68   if (ucx > uce)
69     ucx = uce;
70 #pragma omp atomic compare
71   if (ucx < uce)
72     ucx = uce;
73 #pragma omp atomic compare
74   if (uce > ucx)
75     ucx = uce;
76 #pragma omp atomic compare
77   if (uce < ucx)
78     ucx = uce;
79 
80 #pragma omp atomic compare
81   ucx = ucx == uce ? ucd : ucx;
82 #pragma omp atomic compare
83   ucx = uce == ucx ? ucd : ucx;
84 #pragma omp atomic compare
85   if (ucx == uce)
86     ucx = ucd;
87 #pragma omp atomic compare
88   if (uce == ucx)
89     ucx = ucd;
90 
91 #pragma omp atomic compare acq_rel
92   cx = cx > ce ? ce : cx;
93 #pragma omp atomic compare acq_rel
94   cx = cx < ce ? ce : cx;
95 #pragma omp atomic compare acq_rel
96   cx = ce > cx ? ce : cx;
97 #pragma omp atomic compare acq_rel
98   cx = ce < cx ? ce : cx;
99 #pragma omp atomic compare acq_rel
100   if (cx > ce)
101     cx = ce;
102 #pragma omp atomic compare acq_rel
103   if (cx < ce)
104     cx = ce;
105 #pragma omp atomic compare acq_rel
106   if (ce > cx)
107     cx = ce;
108 #pragma omp atomic compare acq_rel
109   if (ce < cx)
110     cx = ce;
111 
112 #pragma omp atomic compare acq_rel
113   cx = cx == ce ? cd : cx;
114 #pragma omp atomic compare acq_rel
115   cx = ce == cx ? cd : cx;
116 #pragma omp atomic compare acq_rel
117   if (cx == ce)
118     cx = cd;
119 #pragma omp atomic compare acq_rel
120   if (ce == cx)
121     cx = cd;
122 
123 #pragma omp atomic compare acq_rel
124   ucx = ucx > uce ? uce : ucx;
125 #pragma omp atomic compare acq_rel
126   ucx = ucx < uce ? uce : ucx;
127 #pragma omp atomic compare acq_rel
128   ucx = uce > ucx ? uce : ucx;
129 #pragma omp atomic compare acq_rel
130   ucx = uce < ucx ? uce : ucx;
131 #pragma omp atomic compare acq_rel
132   if (ucx > uce)
133     ucx = uce;
134 #pragma omp atomic compare acq_rel
135   if (ucx < uce)
136     ucx = uce;
137 #pragma omp atomic compare acq_rel
138   if (uce > ucx)
139     ucx = uce;
140 #pragma omp atomic compare acq_rel
141   if (uce < ucx)
142     ucx = uce;
143 
144 #pragma omp atomic compare acq_rel
145   ucx = ucx == uce ? ucd : ucx;
146 #pragma omp atomic compare acq_rel
147   ucx = uce == ucx ? ucd : ucx;
148 #pragma omp atomic compare acq_rel
149   if (ucx == uce)
150     ucx = ucd;
151 #pragma omp atomic compare acq_rel
152   if (uce == ucx)
153     ucx = ucd;
154 
155 #pragma omp atomic compare acquire
156   cx = cx > ce ? ce : cx;
157 #pragma omp atomic compare acquire
158   cx = cx < ce ? ce : cx;
159 #pragma omp atomic compare acquire
160   cx = ce > cx ? ce : cx;
161 #pragma omp atomic compare acquire
162   cx = ce < cx ? ce : cx;
163 #pragma omp atomic compare acquire
164   if (cx > ce)
165     cx = ce;
166 #pragma omp atomic compare acquire
167   if (cx < ce)
168     cx = ce;
169 #pragma omp atomic compare acquire
170   if (ce > cx)
171     cx = ce;
172 #pragma omp atomic compare acquire
173   if (ce < cx)
174     cx = ce;
175 
176 #pragma omp atomic compare acquire
177   cx = cx == ce ? cd : cx;
178 #pragma omp atomic compare acquire
179   cx = ce == cx ? cd : cx;
180 #pragma omp atomic compare acquire
181   if (cx == ce)
182     cx = cd;
183 #pragma omp atomic compare acquire
184   if (ce == cx)
185     cx = cd;
186 
187 #pragma omp atomic compare acquire
188   ucx = ucx > uce ? uce : ucx;
189 #pragma omp atomic compare acquire
190   ucx = ucx < uce ? uce : ucx;
191 #pragma omp atomic compare acquire
192   ucx = uce > ucx ? uce : ucx;
193 #pragma omp atomic compare acquire
194   ucx = uce < ucx ? uce : ucx;
195 #pragma omp atomic compare acquire
196   if (ucx > uce)
197     ucx = uce;
198 #pragma omp atomic compare acquire
199   if (ucx < uce)
200     ucx = uce;
201 #pragma omp atomic compare acquire
202   if (uce > ucx)
203     ucx = uce;
204 #pragma omp atomic compare acquire
205   if (uce < ucx)
206     ucx = uce;
207 
208 #pragma omp atomic compare acquire
209   ucx = ucx == uce ? ucd : ucx;
210 #pragma omp atomic compare acquire
211   ucx = uce == ucx ? ucd : ucx;
212 #pragma omp atomic compare acquire
213   if (ucx == uce)
214     ucx = ucd;
215 #pragma omp atomic compare acquire
216   if (uce == ucx)
217     ucx = ucd;
218 
219 #pragma omp atomic compare relaxed
220   cx = cx > ce ? ce : cx;
221 #pragma omp atomic compare relaxed
222   cx = cx < ce ? ce : cx;
223 #pragma omp atomic compare relaxed
224   cx = ce > cx ? ce : cx;
225 #pragma omp atomic compare relaxed
226   cx = ce < cx ? ce : cx;
227 #pragma omp atomic compare relaxed
228   if (cx > ce)
229     cx = ce;
230 #pragma omp atomic compare relaxed
231   if (cx < ce)
232     cx = ce;
233 #pragma omp atomic compare relaxed
234   if (ce > cx)
235     cx = ce;
236 #pragma omp atomic compare relaxed
237   if (ce < cx)
238     cx = ce;
239 
240 #pragma omp atomic compare relaxed
241   cx = cx == ce ? cd : cx;
242 #pragma omp atomic compare relaxed
243   cx = ce == cx ? cd : cx;
244 #pragma omp atomic compare relaxed
245   if (cx == ce)
246     cx = cd;
247 #pragma omp atomic compare relaxed
248   if (ce == cx)
249     cx = cd;
250 
251 #pragma omp atomic compare relaxed
252   ucx = ucx > uce ? uce : ucx;
253 #pragma omp atomic compare relaxed
254   ucx = ucx < uce ? uce : ucx;
255 #pragma omp atomic compare relaxed
256   ucx = uce > ucx ? uce : ucx;
257 #pragma omp atomic compare relaxed
258   ucx = uce < ucx ? uce : ucx;
259 #pragma omp atomic compare relaxed
260   if (ucx > uce)
261     ucx = uce;
262 #pragma omp atomic compare relaxed
263   if (ucx < uce)
264     ucx = uce;
265 #pragma omp atomic compare relaxed
266   if (uce > ucx)
267     ucx = uce;
268 #pragma omp atomic compare relaxed
269   if (uce < ucx)
270     ucx = uce;
271 
272 #pragma omp atomic compare relaxed
273   ucx = ucx == uce ? ucd : ucx;
274 #pragma omp atomic compare relaxed
275   ucx = uce == ucx ? ucd : ucx;
276 #pragma omp atomic compare relaxed
277   if (ucx == uce)
278     ucx = ucd;
279 #pragma omp atomic compare relaxed
280   if (uce == ucx)
281     ucx = ucd;
282 
283 #pragma omp atomic compare release
284   cx = cx > ce ? ce : cx;
285 #pragma omp atomic compare release
286   cx = cx < ce ? ce : cx;
287 #pragma omp atomic compare release
288   cx = ce > cx ? ce : cx;
289 #pragma omp atomic compare release
290   cx = ce < cx ? ce : cx;
291 #pragma omp atomic compare release
292   if (cx > ce)
293     cx = ce;
294 #pragma omp atomic compare release
295   if (cx < ce)
296     cx = ce;
297 #pragma omp atomic compare release
298   if (ce > cx)
299     cx = ce;
300 #pragma omp atomic compare release
301   if (ce < cx)
302     cx = ce;
303 
304 #pragma omp atomic compare release
305   cx = cx == ce ? cd : cx;
306 #pragma omp atomic compare release
307   cx = ce == cx ? cd : cx;
308 #pragma omp atomic compare release
309   if (cx == ce)
310     cx = cd;
311 #pragma omp atomic compare release
312   if (ce == cx)
313     cx = cd;
314 
315 #pragma omp atomic compare release
316   ucx = ucx > uce ? uce : ucx;
317 #pragma omp atomic compare release
318   ucx = ucx < uce ? uce : ucx;
319 #pragma omp atomic compare release
320   ucx = uce > ucx ? uce : ucx;
321 #pragma omp atomic compare release
322   ucx = uce < ucx ? uce : ucx;
323 #pragma omp atomic compare release
324   if (ucx > uce)
325     ucx = uce;
326 #pragma omp atomic compare release
327   if (ucx < uce)
328     ucx = uce;
329 #pragma omp atomic compare release
330   if (uce > ucx)
331     ucx = uce;
332 #pragma omp atomic compare release
333   if (uce < ucx)
334     ucx = uce;
335 
336 #pragma omp atomic compare release
337   ucx = ucx == uce ? ucd : ucx;
338 #pragma omp atomic compare release
339   ucx = uce == ucx ? ucd : ucx;
340 #pragma omp atomic compare release
341   if (ucx == uce)
342     ucx = ucd;
343 #pragma omp atomic compare release
344   if (uce == ucx)
345     ucx = ucd;
346 
347 #pragma omp atomic compare seq_cst
348   cx = cx > ce ? ce : cx;
349 #pragma omp atomic compare seq_cst
350   cx = cx < ce ? ce : cx;
351 #pragma omp atomic compare seq_cst
352   cx = ce > cx ? ce : cx;
353 #pragma omp atomic compare seq_cst
354   cx = ce < cx ? ce : cx;
355 #pragma omp atomic compare seq_cst
356   if (cx > ce)
357     cx = ce;
358 #pragma omp atomic compare seq_cst
359   if (cx < ce)
360     cx = ce;
361 #pragma omp atomic compare seq_cst
362   if (ce > cx)
363     cx = ce;
364 #pragma omp atomic compare seq_cst
365   if (ce < cx)
366     cx = ce;
367 
368 #pragma omp atomic compare seq_cst
369   cx = cx == ce ? cd : cx;
370 #pragma omp atomic compare seq_cst
371   cx = ce == cx ? cd : cx;
372 #pragma omp atomic compare seq_cst
373   if (cx == ce)
374     cx = cd;
375 #pragma omp atomic compare seq_cst
376   if (ce == cx)
377     cx = cd;
378 
379 #pragma omp atomic compare seq_cst
380   ucx = ucx > uce ? uce : ucx;
381 #pragma omp atomic compare seq_cst
382   ucx = ucx < uce ? uce : ucx;
383 #pragma omp atomic compare seq_cst
384   ucx = uce > ucx ? uce : ucx;
385 #pragma omp atomic compare seq_cst
386   ucx = uce < ucx ? uce : ucx;
387 #pragma omp atomic compare seq_cst
388   if (ucx > uce)
389     ucx = uce;
390 #pragma omp atomic compare seq_cst
391   if (ucx < uce)
392     ucx = uce;
393 #pragma omp atomic compare seq_cst
394   if (uce > ucx)
395     ucx = uce;
396 #pragma omp atomic compare seq_cst
397   if (uce < ucx)
398     ucx = uce;
399 
400 #pragma omp atomic compare seq_cst
401   ucx = ucx == uce ? ucd : ucx;
402 #pragma omp atomic compare seq_cst
403   ucx = uce == ucx ? ucd : ucx;
404 #pragma omp atomic compare seq_cst
405   if (ucx == uce)
406     ucx = ucd;
407 #pragma omp atomic compare seq_cst
408   if (uce == ucx)
409     ucx = ucd;
410 
411 #pragma omp atomic compare
412   sx = sx > se ? se : sx;
413 #pragma omp atomic compare
414   sx = sx < se ? se : sx;
415 #pragma omp atomic compare
416   sx = se > sx ? se : sx;
417 #pragma omp atomic compare
418   sx = se < sx ? se : sx;
419 #pragma omp atomic compare
420   if (sx > se)
421     sx = se;
422 #pragma omp atomic compare
423   if (sx < se)
424     sx = se;
425 #pragma omp atomic compare
426   if (se > sx)
427     sx = se;
428 #pragma omp atomic compare
429   if (se < sx)
430     sx = se;
431 
432 #pragma omp atomic compare
433   sx = sx == se ? sd : sx;
434 #pragma omp atomic compare
435   sx = se == sx ? sd : sx;
436 #pragma omp atomic compare
437   if (sx == se)
438     sx = sd;
439 #pragma omp atomic compare
440   if (se == sx)
441     sx = sd;
442 
443 #pragma omp atomic compare
444   usx = usx > use ? use : usx;
445 #pragma omp atomic compare
446   usx = usx < use ? use : usx;
447 #pragma omp atomic compare
448   usx = use > usx ? use : usx;
449 #pragma omp atomic compare
450   usx = use < usx ? use : usx;
451 #pragma omp atomic compare
452   if (usx > use)
453     usx = use;
454 #pragma omp atomic compare
455   if (usx < use)
456     usx = use;
457 #pragma omp atomic compare
458   if (use > usx)
459     usx = use;
460 #pragma omp atomic compare
461   if (use < usx)
462     usx = use;
463 
464 #pragma omp atomic compare
465   usx = usx == use ? usd : usx;
466 #pragma omp atomic compare
467   usx = use == usx ? usd : usx;
468 #pragma omp atomic compare
469   if (usx == use)
470     usx = usd;
471 #pragma omp atomic compare
472   if (use == usx)
473     usx = usd;
474 
475 #pragma omp atomic compare acq_rel
476   sx = sx > se ? se : sx;
477 #pragma omp atomic compare acq_rel
478   sx = sx < se ? se : sx;
479 #pragma omp atomic compare acq_rel
480   sx = se > sx ? se : sx;
481 #pragma omp atomic compare acq_rel
482   sx = se < sx ? se : sx;
483 #pragma omp atomic compare acq_rel
484   if (sx > se)
485     sx = se;
486 #pragma omp atomic compare acq_rel
487   if (sx < se)
488     sx = se;
489 #pragma omp atomic compare acq_rel
490   if (se > sx)
491     sx = se;
492 #pragma omp atomic compare acq_rel
493   if (se < sx)
494     sx = se;
495 
496 #pragma omp atomic compare acq_rel
497   sx = sx == se ? sd : sx;
498 #pragma omp atomic compare acq_rel
499   sx = se == sx ? sd : sx;
500 #pragma omp atomic compare acq_rel
501   if (sx == se)
502     sx = sd;
503 #pragma omp atomic compare acq_rel
504   if (se == sx)
505     sx = sd;
506 
507 #pragma omp atomic compare acq_rel
508   usx = usx > use ? use : usx;
509 #pragma omp atomic compare acq_rel
510   usx = usx < use ? use : usx;
511 #pragma omp atomic compare acq_rel
512   usx = use > usx ? use : usx;
513 #pragma omp atomic compare acq_rel
514   usx = use < usx ? use : usx;
515 #pragma omp atomic compare acq_rel
516   if (usx > use)
517     usx = use;
518 #pragma omp atomic compare acq_rel
519   if (usx < use)
520     usx = use;
521 #pragma omp atomic compare acq_rel
522   if (use > usx)
523     usx = use;
524 #pragma omp atomic compare acq_rel
525   if (use < usx)
526     usx = use;
527 
528 #pragma omp atomic compare acq_rel
529   usx = usx == use ? usd : usx;
530 #pragma omp atomic compare acq_rel
531   usx = use == usx ? usd : usx;
532 #pragma omp atomic compare acq_rel
533   if (usx == use)
534     usx = usd;
535 #pragma omp atomic compare acq_rel
536   if (use == usx)
537     usx = usd;
538 
539 #pragma omp atomic compare acquire
540   sx = sx > se ? se : sx;
541 #pragma omp atomic compare acquire
542   sx = sx < se ? se : sx;
543 #pragma omp atomic compare acquire
544   sx = se > sx ? se : sx;
545 #pragma omp atomic compare acquire
546   sx = se < sx ? se : sx;
547 #pragma omp atomic compare acquire
548   if (sx > se)
549     sx = se;
550 #pragma omp atomic compare acquire
551   if (sx < se)
552     sx = se;
553 #pragma omp atomic compare acquire
554   if (se > sx)
555     sx = se;
556 #pragma omp atomic compare acquire
557   if (se < sx)
558     sx = se;
559 
560 #pragma omp atomic compare acquire
561   sx = sx == se ? sd : sx;
562 #pragma omp atomic compare acquire
563   sx = se == sx ? sd : sx;
564 #pragma omp atomic compare acquire
565   if (sx == se)
566     sx = sd;
567 #pragma omp atomic compare acquire
568   if (se == sx)
569     sx = sd;
570 
571 #pragma omp atomic compare acquire
572   usx = usx > use ? use : usx;
573 #pragma omp atomic compare acquire
574   usx = usx < use ? use : usx;
575 #pragma omp atomic compare acquire
576   usx = use > usx ? use : usx;
577 #pragma omp atomic compare acquire
578   usx = use < usx ? use : usx;
579 #pragma omp atomic compare acquire
580   if (usx > use)
581     usx = use;
582 #pragma omp atomic compare acquire
583   if (usx < use)
584     usx = use;
585 #pragma omp atomic compare acquire
586   if (use > usx)
587     usx = use;
588 #pragma omp atomic compare acquire
589   if (use < usx)
590     usx = use;
591 
592 #pragma omp atomic compare acquire
593   usx = usx == use ? usd : usx;
594 #pragma omp atomic compare acquire
595   usx = use == usx ? usd : usx;
596 #pragma omp atomic compare acquire
597   if (usx == use)
598     usx = usd;
599 #pragma omp atomic compare acquire
600   if (use == usx)
601     usx = usd;
602 
603 #pragma omp atomic compare relaxed
604   sx = sx > se ? se : sx;
605 #pragma omp atomic compare relaxed
606   sx = sx < se ? se : sx;
607 #pragma omp atomic compare relaxed
608   sx = se > sx ? se : sx;
609 #pragma omp atomic compare relaxed
610   sx = se < sx ? se : sx;
611 #pragma omp atomic compare relaxed
612   if (sx > se)
613     sx = se;
614 #pragma omp atomic compare relaxed
615   if (sx < se)
616     sx = se;
617 #pragma omp atomic compare relaxed
618   if (se > sx)
619     sx = se;
620 #pragma omp atomic compare relaxed
621   if (se < sx)
622     sx = se;
623 
624 #pragma omp atomic compare relaxed
625   sx = sx == se ? sd : sx;
626 #pragma omp atomic compare relaxed
627   sx = se == sx ? sd : sx;
628 #pragma omp atomic compare relaxed
629   if (sx == se)
630     sx = sd;
631 #pragma omp atomic compare relaxed
632   if (se == sx)
633     sx = sd;
634 
635 #pragma omp atomic compare relaxed
636   usx = usx > use ? use : usx;
637 #pragma omp atomic compare relaxed
638   usx = usx < use ? use : usx;
639 #pragma omp atomic compare relaxed
640   usx = use > usx ? use : usx;
641 #pragma omp atomic compare relaxed
642   usx = use < usx ? use : usx;
643 #pragma omp atomic compare relaxed
644   if (usx > use)
645     usx = use;
646 #pragma omp atomic compare relaxed
647   if (usx < use)
648     usx = use;
649 #pragma omp atomic compare relaxed
650   if (use > usx)
651     usx = use;
652 #pragma omp atomic compare relaxed
653   if (use < usx)
654     usx = use;
655 
656 #pragma omp atomic compare relaxed
657   usx = usx == use ? usd : usx;
658 #pragma omp atomic compare relaxed
659   usx = use == usx ? usd : usx;
660 #pragma omp atomic compare relaxed
661   if (usx == use)
662     usx = usd;
663 #pragma omp atomic compare relaxed
664   if (use == usx)
665     usx = usd;
666 
667 #pragma omp atomic compare release
668   sx = sx > se ? se : sx;
669 #pragma omp atomic compare release
670   sx = sx < se ? se : sx;
671 #pragma omp atomic compare release
672   sx = se > sx ? se : sx;
673 #pragma omp atomic compare release
674   sx = se < sx ? se : sx;
675 #pragma omp atomic compare release
676   if (sx > se)
677     sx = se;
678 #pragma omp atomic compare release
679   if (sx < se)
680     sx = se;
681 #pragma omp atomic compare release
682   if (se > sx)
683     sx = se;
684 #pragma omp atomic compare release
685   if (se < sx)
686     sx = se;
687 
688 #pragma omp atomic compare release
689   sx = sx == se ? sd : sx;
690 #pragma omp atomic compare release
691   sx = se == sx ? sd : sx;
692 #pragma omp atomic compare release
693   if (sx == se)
694     sx = sd;
695 #pragma omp atomic compare release
696   if (se == sx)
697     sx = sd;
698 
699 #pragma omp atomic compare release
700   usx = usx > use ? use : usx;
701 #pragma omp atomic compare release
702   usx = usx < use ? use : usx;
703 #pragma omp atomic compare release
704   usx = use > usx ? use : usx;
705 #pragma omp atomic compare release
706   usx = use < usx ? use : usx;
707 #pragma omp atomic compare release
708   if (usx > use)
709     usx = use;
710 #pragma omp atomic compare release
711   if (usx < use)
712     usx = use;
713 #pragma omp atomic compare release
714   if (use > usx)
715     usx = use;
716 #pragma omp atomic compare release
717   if (use < usx)
718     usx = use;
719 
720 #pragma omp atomic compare release
721   usx = usx == use ? usd : usx;
722 #pragma omp atomic compare release
723   usx = use == usx ? usd : usx;
724 #pragma omp atomic compare release
725   if (usx == use)
726     usx = usd;
727 #pragma omp atomic compare release
728   if (use == usx)
729     usx = usd;
730 
731 #pragma omp atomic compare seq_cst
732   sx = sx > se ? se : sx;
733 #pragma omp atomic compare seq_cst
734   sx = sx < se ? se : sx;
735 #pragma omp atomic compare seq_cst
736   sx = se > sx ? se : sx;
737 #pragma omp atomic compare seq_cst
738   sx = se < sx ? se : sx;
739 #pragma omp atomic compare seq_cst
740   if (sx > se)
741     sx = se;
742 #pragma omp atomic compare seq_cst
743   if (sx < se)
744     sx = se;
745 #pragma omp atomic compare seq_cst
746   if (se > sx)
747     sx = se;
748 #pragma omp atomic compare seq_cst
749   if (se < sx)
750     sx = se;
751 
752 #pragma omp atomic compare seq_cst
753   sx = sx == se ? sd : sx;
754 #pragma omp atomic compare seq_cst
755   sx = se == sx ? sd : sx;
756 #pragma omp atomic compare seq_cst
757   if (sx == se)
758     sx = sd;
759 #pragma omp atomic compare seq_cst
760   if (se == sx)
761     sx = sd;
762 
763 #pragma omp atomic compare seq_cst
764   usx = usx > use ? use : usx;
765 #pragma omp atomic compare seq_cst
766   usx = usx < use ? use : usx;
767 #pragma omp atomic compare seq_cst
768   usx = use > usx ? use : usx;
769 #pragma omp atomic compare seq_cst
770   usx = use < usx ? use : usx;
771 #pragma omp atomic compare seq_cst
772   if (usx > use)
773     usx = use;
774 #pragma omp atomic compare seq_cst
775   if (usx < use)
776     usx = use;
777 #pragma omp atomic compare seq_cst
778   if (use > usx)
779     usx = use;
780 #pragma omp atomic compare seq_cst
781   if (use < usx)
782     usx = use;
783 
784 #pragma omp atomic compare seq_cst
785   usx = usx == use ? usd : usx;
786 #pragma omp atomic compare seq_cst
787   usx = use == usx ? usd : usx;
788 #pragma omp atomic compare seq_cst
789   if (usx == use)
790     usx = usd;
791 #pragma omp atomic compare seq_cst
792   if (use == usx)
793     usx = usd;
794 
795 #pragma omp atomic compare
796   ix = ix > ie ? ie : ix;
797 #pragma omp atomic compare
798   ix = ix < ie ? ie : ix;
799 #pragma omp atomic compare
800   ix = ie > ix ? ie : ix;
801 #pragma omp atomic compare
802   ix = ie < ix ? ie : ix;
803 #pragma omp atomic compare
804   if (ix > ie)
805     ix = ie;
806 #pragma omp atomic compare
807   if (ix < ie)
808     ix = ie;
809 #pragma omp atomic compare
810   if (ie > ix)
811     ix = ie;
812 #pragma omp atomic compare
813   if (ie < ix)
814     ix = ie;
815 
816 #pragma omp atomic compare
817   ix = ix == ie ? id : ix;
818 #pragma omp atomic compare
819   ix = ie == ix ? id : ix;
820 #pragma omp atomic compare
821   if (ix == ie)
822     ix = id;
823 #pragma omp atomic compare
824   if (ie == ix)
825     ix = id;
826 
827 #pragma omp atomic compare
828   uix = uix > uie ? uie : uix;
829 #pragma omp atomic compare
830   uix = uix < uie ? uie : uix;
831 #pragma omp atomic compare
832   uix = uie > uix ? uie : uix;
833 #pragma omp atomic compare
834   uix = uie < uix ? uie : uix;
835 #pragma omp atomic compare
836   if (uix > uie)
837     uix = uie;
838 #pragma omp atomic compare
839   if (uix < uie)
840     uix = uie;
841 #pragma omp atomic compare
842   if (uie > uix)
843     uix = uie;
844 #pragma omp atomic compare
845   if (uie < uix)
846     uix = uie;
847 
848 #pragma omp atomic compare
849   uix = uix == uie ? uid : uix;
850 #pragma omp atomic compare
851   uix = uie == uix ? uid : uix;
852 #pragma omp atomic compare
853   if (uix == uie)
854     uix = uid;
855 #pragma omp atomic compare
856   if (uie == uix)
857     uix = uid;
858 
859 #pragma omp atomic compare acq_rel
860   ix = ix > ie ? ie : ix;
861 #pragma omp atomic compare acq_rel
862   ix = ix < ie ? ie : ix;
863 #pragma omp atomic compare acq_rel
864   ix = ie > ix ? ie : ix;
865 #pragma omp atomic compare acq_rel
866   ix = ie < ix ? ie : ix;
867 #pragma omp atomic compare acq_rel
868   if (ix > ie)
869     ix = ie;
870 #pragma omp atomic compare acq_rel
871   if (ix < ie)
872     ix = ie;
873 #pragma omp atomic compare acq_rel
874   if (ie > ix)
875     ix = ie;
876 #pragma omp atomic compare acq_rel
877   if (ie < ix)
878     ix = ie;
879 
880 #pragma omp atomic compare acq_rel
881   ix = ix == ie ? id : ix;
882 #pragma omp atomic compare acq_rel
883   ix = ie == ix ? id : ix;
884 #pragma omp atomic compare acq_rel
885   if (ix == ie)
886     ix = id;
887 #pragma omp atomic compare acq_rel
888   if (ie == ix)
889     ix = id;
890 
891 #pragma omp atomic compare acq_rel
892   uix = uix > uie ? uie : uix;
893 #pragma omp atomic compare acq_rel
894   uix = uix < uie ? uie : uix;
895 #pragma omp atomic compare acq_rel
896   uix = uie > uix ? uie : uix;
897 #pragma omp atomic compare acq_rel
898   uix = uie < uix ? uie : uix;
899 #pragma omp atomic compare acq_rel
900   if (uix > uie)
901     uix = uie;
902 #pragma omp atomic compare acq_rel
903   if (uix < uie)
904     uix = uie;
905 #pragma omp atomic compare acq_rel
906   if (uie > uix)
907     uix = uie;
908 #pragma omp atomic compare acq_rel
909   if (uie < uix)
910     uix = uie;
911 
912 #pragma omp atomic compare acq_rel
913   uix = uix == uie ? uid : uix;
914 #pragma omp atomic compare acq_rel
915   uix = uie == uix ? uid : uix;
916 #pragma omp atomic compare acq_rel
917   if (uix == uie)
918     uix = uid;
919 #pragma omp atomic compare acq_rel
920   if (uie == uix)
921     uix = uid;
922 
923 #pragma omp atomic compare acquire
924   ix = ix > ie ? ie : ix;
925 #pragma omp atomic compare acquire
926   ix = ix < ie ? ie : ix;
927 #pragma omp atomic compare acquire
928   ix = ie > ix ? ie : ix;
929 #pragma omp atomic compare acquire
930   ix = ie < ix ? ie : ix;
931 #pragma omp atomic compare acquire
932   if (ix > ie)
933     ix = ie;
934 #pragma omp atomic compare acquire
935   if (ix < ie)
936     ix = ie;
937 #pragma omp atomic compare acquire
938   if (ie > ix)
939     ix = ie;
940 #pragma omp atomic compare acquire
941   if (ie < ix)
942     ix = ie;
943 
944 #pragma omp atomic compare acquire
945   ix = ix == ie ? id : ix;
946 #pragma omp atomic compare acquire
947   ix = ie == ix ? id : ix;
948 #pragma omp atomic compare acquire
949   if (ix == ie)
950     ix = id;
951 #pragma omp atomic compare acquire
952   if (ie == ix)
953     ix = id;
954 
955 #pragma omp atomic compare acquire
956   uix = uix > uie ? uie : uix;
957 #pragma omp atomic compare acquire
958   uix = uix < uie ? uie : uix;
959 #pragma omp atomic compare acquire
960   uix = uie > uix ? uie : uix;
961 #pragma omp atomic compare acquire
962   uix = uie < uix ? uie : uix;
963 #pragma omp atomic compare acquire
964   if (uix > uie)
965     uix = uie;
966 #pragma omp atomic compare acquire
967   if (uix < uie)
968     uix = uie;
969 #pragma omp atomic compare acquire
970   if (uie > uix)
971     uix = uie;
972 #pragma omp atomic compare acquire
973   if (uie < uix)
974     uix = uie;
975 
976 #pragma omp atomic compare acquire
977   uix = uix == uie ? uid : uix;
978 #pragma omp atomic compare acquire
979   uix = uie == uix ? uid : uix;
980 #pragma omp atomic compare acquire
981   if (uix == uie)
982     uix = uid;
983 #pragma omp atomic compare acquire
984   if (uie == uix)
985     uix = uid;
986 
987 #pragma omp atomic compare relaxed
988   ix = ix > ie ? ie : ix;
989 #pragma omp atomic compare relaxed
990   ix = ix < ie ? ie : ix;
991 #pragma omp atomic compare relaxed
992   ix = ie > ix ? ie : ix;
993 #pragma omp atomic compare relaxed
994   ix = ie < ix ? ie : ix;
995 #pragma omp atomic compare relaxed
996   if (ix > ie)
997     ix = ie;
998 #pragma omp atomic compare relaxed
999   if (ix < ie)
1000     ix = ie;
1001 #pragma omp atomic compare relaxed
1002   if (ie > ix)
1003     ix = ie;
1004 #pragma omp atomic compare relaxed
1005   if (ie < ix)
1006     ix = ie;
1007 
1008 #pragma omp atomic compare relaxed
1009   ix = ix == ie ? id : ix;
1010 #pragma omp atomic compare relaxed
1011   ix = ie == ix ? id : ix;
1012 #pragma omp atomic compare relaxed
1013   if (ix == ie)
1014     ix = id;
1015 #pragma omp atomic compare relaxed
1016   if (ie == ix)
1017     ix = id;
1018 
1019 #pragma omp atomic compare relaxed
1020   uix = uix > uie ? uie : uix;
1021 #pragma omp atomic compare relaxed
1022   uix = uix < uie ? uie : uix;
1023 #pragma omp atomic compare relaxed
1024   uix = uie > uix ? uie : uix;
1025 #pragma omp atomic compare relaxed
1026   uix = uie < uix ? uie : uix;
1027 #pragma omp atomic compare relaxed
1028   if (uix > uie)
1029     uix = uie;
1030 #pragma omp atomic compare relaxed
1031   if (uix < uie)
1032     uix = uie;
1033 #pragma omp atomic compare relaxed
1034   if (uie > uix)
1035     uix = uie;
1036 #pragma omp atomic compare relaxed
1037   if (uie < uix)
1038     uix = uie;
1039 
1040 #pragma omp atomic compare relaxed
1041   uix = uix == uie ? uid : uix;
1042 #pragma omp atomic compare relaxed
1043   uix = uie == uix ? uid : uix;
1044 #pragma omp atomic compare relaxed
1045   if (uix == uie)
1046     uix = uid;
1047 #pragma omp atomic compare relaxed
1048   if (uie == uix)
1049     uix = uid;
1050 
1051 #pragma omp atomic compare release
1052   ix = ix > ie ? ie : ix;
1053 #pragma omp atomic compare release
1054   ix = ix < ie ? ie : ix;
1055 #pragma omp atomic compare release
1056   ix = ie > ix ? ie : ix;
1057 #pragma omp atomic compare release
1058   ix = ie < ix ? ie : ix;
1059 #pragma omp atomic compare release
1060   if (ix > ie)
1061     ix = ie;
1062 #pragma omp atomic compare release
1063   if (ix < ie)
1064     ix = ie;
1065 #pragma omp atomic compare release
1066   if (ie > ix)
1067     ix = ie;
1068 #pragma omp atomic compare release
1069   if (ie < ix)
1070     ix = ie;
1071 
1072 #pragma omp atomic compare release
1073   ix = ix == ie ? id : ix;
1074 #pragma omp atomic compare release
1075   ix = ie == ix ? id : ix;
1076 #pragma omp atomic compare release
1077   if (ix == ie)
1078     ix = id;
1079 #pragma omp atomic compare release
1080   if (ie == ix)
1081     ix = id;
1082 
1083 #pragma omp atomic compare release
1084   uix = uix > uie ? uie : uix;
1085 #pragma omp atomic compare release
1086   uix = uix < uie ? uie : uix;
1087 #pragma omp atomic compare release
1088   uix = uie > uix ? uie : uix;
1089 #pragma omp atomic compare release
1090   uix = uie < uix ? uie : uix;
1091 #pragma omp atomic compare release
1092   if (uix > uie)
1093     uix = uie;
1094 #pragma omp atomic compare release
1095   if (uix < uie)
1096     uix = uie;
1097 #pragma omp atomic compare release
1098   if (uie > uix)
1099     uix = uie;
1100 #pragma omp atomic compare release
1101   if (uie < uix)
1102     uix = uie;
1103 
1104 #pragma omp atomic compare release
1105   uix = uix == uie ? uid : uix;
1106 #pragma omp atomic compare release
1107   uix = uie == uix ? uid : uix;
1108 #pragma omp atomic compare release
1109   if (uix == uie)
1110     uix = uid;
1111 #pragma omp atomic compare release
1112   if (uie == uix)
1113     uix = uid;
1114 
1115 #pragma omp atomic compare seq_cst
1116   ix = ix > ie ? ie : ix;
1117 #pragma omp atomic compare seq_cst
1118   ix = ix < ie ? ie : ix;
1119 #pragma omp atomic compare seq_cst
1120   ix = ie > ix ? ie : ix;
1121 #pragma omp atomic compare seq_cst
1122   ix = ie < ix ? ie : ix;
1123 #pragma omp atomic compare seq_cst
1124   if (ix > ie)
1125     ix = ie;
1126 #pragma omp atomic compare seq_cst
1127   if (ix < ie)
1128     ix = ie;
1129 #pragma omp atomic compare seq_cst
1130   if (ie > ix)
1131     ix = ie;
1132 #pragma omp atomic compare seq_cst
1133   if (ie < ix)
1134     ix = ie;
1135 
1136 #pragma omp atomic compare seq_cst
1137   ix = ix == ie ? id : ix;
1138 #pragma omp atomic compare seq_cst
1139   ix = ie == ix ? id : ix;
1140 #pragma omp atomic compare seq_cst
1141   if (ix == ie)
1142     ix = id;
1143 #pragma omp atomic compare seq_cst
1144   if (ie == ix)
1145     ix = id;
1146 
1147 #pragma omp atomic compare seq_cst
1148   uix = uix > uie ? uie : uix;
1149 #pragma omp atomic compare seq_cst
1150   uix = uix < uie ? uie : uix;
1151 #pragma omp atomic compare seq_cst
1152   uix = uie > uix ? uie : uix;
1153 #pragma omp atomic compare seq_cst
1154   uix = uie < uix ? uie : uix;
1155 #pragma omp atomic compare seq_cst
1156   if (uix > uie)
1157     uix = uie;
1158 #pragma omp atomic compare seq_cst
1159   if (uix < uie)
1160     uix = uie;
1161 #pragma omp atomic compare seq_cst
1162   if (uie > uix)
1163     uix = uie;
1164 #pragma omp atomic compare seq_cst
1165   if (uie < uix)
1166     uix = uie;
1167 
1168 #pragma omp atomic compare seq_cst
1169   uix = uix == uie ? uid : uix;
1170 #pragma omp atomic compare seq_cst
1171   uix = uie == uix ? uid : uix;
1172 #pragma omp atomic compare seq_cst
1173   if (uix == uie)
1174     uix = uid;
1175 #pragma omp atomic compare seq_cst
1176   if (uie == uix)
1177     uix = uid;
1178 
1179 #pragma omp atomic compare
1180   lx = lx > le ? le : lx;
1181 #pragma omp atomic compare
1182   lx = lx < le ? le : lx;
1183 #pragma omp atomic compare
1184   lx = le > lx ? le : lx;
1185 #pragma omp atomic compare
1186   lx = le < lx ? le : lx;
1187 #pragma omp atomic compare
1188   if (lx > le)
1189     lx = le;
1190 #pragma omp atomic compare
1191   if (lx < le)
1192     lx = le;
1193 #pragma omp atomic compare
1194   if (le > lx)
1195     lx = le;
1196 #pragma omp atomic compare
1197   if (le < lx)
1198     lx = le;
1199 
1200 #pragma omp atomic compare
1201   lx = lx == le ? ld : lx;
1202 #pragma omp atomic compare
1203   lx = le == lx ? ld : lx;
1204 #pragma omp atomic compare
1205   if (lx == le)
1206     lx = ld;
1207 #pragma omp atomic compare
1208   if (le == lx)
1209     lx = ld;
1210 
1211 #pragma omp atomic compare
1212   ulx = ulx > ule ? ule : ulx;
1213 #pragma omp atomic compare
1214   ulx = ulx < ule ? ule : ulx;
1215 #pragma omp atomic compare
1216   ulx = ule > ulx ? ule : ulx;
1217 #pragma omp atomic compare
1218   ulx = ule < ulx ? ule : ulx;
1219 #pragma omp atomic compare
1220   if (ulx > ule)
1221     ulx = ule;
1222 #pragma omp atomic compare
1223   if (ulx < ule)
1224     ulx = ule;
1225 #pragma omp atomic compare
1226   if (ule > ulx)
1227     ulx = ule;
1228 #pragma omp atomic compare
1229   if (ule < ulx)
1230     ulx = ule;
1231 
1232 #pragma omp atomic compare
1233   ulx = ulx == ule ? uld : ulx;
1234 #pragma omp atomic compare
1235   ulx = ule == ulx ? uld : ulx;
1236 #pragma omp atomic compare
1237   if (ulx == ule)
1238     ulx = uld;
1239 #pragma omp atomic compare
1240   if (ule == ulx)
1241     ulx = uld;
1242 
1243 #pragma omp atomic compare acq_rel
1244   lx = lx > le ? le : lx;
1245 #pragma omp atomic compare acq_rel
1246   lx = lx < le ? le : lx;
1247 #pragma omp atomic compare acq_rel
1248   lx = le > lx ? le : lx;
1249 #pragma omp atomic compare acq_rel
1250   lx = le < lx ? le : lx;
1251 #pragma omp atomic compare acq_rel
1252   if (lx > le)
1253     lx = le;
1254 #pragma omp atomic compare acq_rel
1255   if (lx < le)
1256     lx = le;
1257 #pragma omp atomic compare acq_rel
1258   if (le > lx)
1259     lx = le;
1260 #pragma omp atomic compare acq_rel
1261   if (le < lx)
1262     lx = le;
1263 
1264 #pragma omp atomic compare acq_rel
1265   lx = lx == le ? ld : lx;
1266 #pragma omp atomic compare acq_rel
1267   lx = le == lx ? ld : lx;
1268 #pragma omp atomic compare acq_rel
1269   if (lx == le)
1270     lx = ld;
1271 #pragma omp atomic compare acq_rel
1272   if (le == lx)
1273     lx = ld;
1274 
1275 #pragma omp atomic compare acq_rel
1276   ulx = ulx > ule ? ule : ulx;
1277 #pragma omp atomic compare acq_rel
1278   ulx = ulx < ule ? ule : ulx;
1279 #pragma omp atomic compare acq_rel
1280   ulx = ule > ulx ? ule : ulx;
1281 #pragma omp atomic compare acq_rel
1282   ulx = ule < ulx ? ule : ulx;
1283 #pragma omp atomic compare acq_rel
1284   if (ulx > ule)
1285     ulx = ule;
1286 #pragma omp atomic compare acq_rel
1287   if (ulx < ule)
1288     ulx = ule;
1289 #pragma omp atomic compare acq_rel
1290   if (ule > ulx)
1291     ulx = ule;
1292 #pragma omp atomic compare acq_rel
1293   if (ule < ulx)
1294     ulx = ule;
1295 
1296 #pragma omp atomic compare acq_rel
1297   ulx = ulx == ule ? uld : ulx;
1298 #pragma omp atomic compare acq_rel
1299   ulx = ule == ulx ? uld : ulx;
1300 #pragma omp atomic compare acq_rel
1301   if (ulx == ule)
1302     ulx = uld;
1303 #pragma omp atomic compare acq_rel
1304   if (ule == ulx)
1305     ulx = uld;
1306 
1307 #pragma omp atomic compare acquire
1308   lx = lx > le ? le : lx;
1309 #pragma omp atomic compare acquire
1310   lx = lx < le ? le : lx;
1311 #pragma omp atomic compare acquire
1312   lx = le > lx ? le : lx;
1313 #pragma omp atomic compare acquire
1314   lx = le < lx ? le : lx;
1315 #pragma omp atomic compare acquire
1316   if (lx > le)
1317     lx = le;
1318 #pragma omp atomic compare acquire
1319   if (lx < le)
1320     lx = le;
1321 #pragma omp atomic compare acquire
1322   if (le > lx)
1323     lx = le;
1324 #pragma omp atomic compare acquire
1325   if (le < lx)
1326     lx = le;
1327 
1328 #pragma omp atomic compare acquire
1329   lx = lx == le ? ld : lx;
1330 #pragma omp atomic compare acquire
1331   lx = le == lx ? ld : lx;
1332 #pragma omp atomic compare acquire
1333   if (lx == le)
1334     lx = ld;
1335 #pragma omp atomic compare acquire
1336   if (le == lx)
1337     lx = ld;
1338 
1339 #pragma omp atomic compare acquire
1340   ulx = ulx > ule ? ule : ulx;
1341 #pragma omp atomic compare acquire
1342   ulx = ulx < ule ? ule : ulx;
1343 #pragma omp atomic compare acquire
1344   ulx = ule > ulx ? ule : ulx;
1345 #pragma omp atomic compare acquire
1346   ulx = ule < ulx ? ule : ulx;
1347 #pragma omp atomic compare acquire
1348   if (ulx > ule)
1349     ulx = ule;
1350 #pragma omp atomic compare acquire
1351   if (ulx < ule)
1352     ulx = ule;
1353 #pragma omp atomic compare acquire
1354   if (ule > ulx)
1355     ulx = ule;
1356 #pragma omp atomic compare acquire
1357   if (ule < ulx)
1358     ulx = ule;
1359 
1360 #pragma omp atomic compare acquire
1361   ulx = ulx == ule ? uld : ulx;
1362 #pragma omp atomic compare acquire
1363   ulx = ule == ulx ? uld : ulx;
1364 #pragma omp atomic compare acquire
1365   if (ulx == ule)
1366     ulx = uld;
1367 #pragma omp atomic compare acquire
1368   if (ule == ulx)
1369     ulx = uld;
1370 
1371 #pragma omp atomic compare relaxed
1372   lx = lx > le ? le : lx;
1373 #pragma omp atomic compare relaxed
1374   lx = lx < le ? le : lx;
1375 #pragma omp atomic compare relaxed
1376   lx = le > lx ? le : lx;
1377 #pragma omp atomic compare relaxed
1378   lx = le < lx ? le : lx;
1379 #pragma omp atomic compare relaxed
1380   if (lx > le)
1381     lx = le;
1382 #pragma omp atomic compare relaxed
1383   if (lx < le)
1384     lx = le;
1385 #pragma omp atomic compare relaxed
1386   if (le > lx)
1387     lx = le;
1388 #pragma omp atomic compare relaxed
1389   if (le < lx)
1390     lx = le;
1391 
1392 #pragma omp atomic compare relaxed
1393   lx = lx == le ? ld : lx;
1394 #pragma omp atomic compare relaxed
1395   lx = le == lx ? ld : lx;
1396 #pragma omp atomic compare relaxed
1397   if (lx == le)
1398     lx = ld;
1399 #pragma omp atomic compare relaxed
1400   if (le == lx)
1401     lx = ld;
1402 
1403 #pragma omp atomic compare relaxed
1404   ulx = ulx > ule ? ule : ulx;
1405 #pragma omp atomic compare relaxed
1406   ulx = ulx < ule ? ule : ulx;
1407 #pragma omp atomic compare relaxed
1408   ulx = ule > ulx ? ule : ulx;
1409 #pragma omp atomic compare relaxed
1410   ulx = ule < ulx ? ule : ulx;
1411 #pragma omp atomic compare relaxed
1412   if (ulx > ule)
1413     ulx = ule;
1414 #pragma omp atomic compare relaxed
1415   if (ulx < ule)
1416     ulx = ule;
1417 #pragma omp atomic compare relaxed
1418   if (ule > ulx)
1419     ulx = ule;
1420 #pragma omp atomic compare relaxed
1421   if (ule < ulx)
1422     ulx = ule;
1423 
1424 #pragma omp atomic compare relaxed
1425   ulx = ulx == ule ? uld : ulx;
1426 #pragma omp atomic compare relaxed
1427   ulx = ule == ulx ? uld : ulx;
1428 #pragma omp atomic compare relaxed
1429   if (ulx == ule)
1430     ulx = uld;
1431 #pragma omp atomic compare relaxed
1432   if (ule == ulx)
1433     ulx = uld;
1434 
1435 #pragma omp atomic compare release
1436   lx = lx > le ? le : lx;
1437 #pragma omp atomic compare release
1438   lx = lx < le ? le : lx;
1439 #pragma omp atomic compare release
1440   lx = le > lx ? le : lx;
1441 #pragma omp atomic compare release
1442   lx = le < lx ? le : lx;
1443 #pragma omp atomic compare release
1444   if (lx > le)
1445     lx = le;
1446 #pragma omp atomic compare release
1447   if (lx < le)
1448     lx = le;
1449 #pragma omp atomic compare release
1450   if (le > lx)
1451     lx = le;
1452 #pragma omp atomic compare release
1453   if (le < lx)
1454     lx = le;
1455 
1456 #pragma omp atomic compare release
1457   lx = lx == le ? ld : lx;
1458 #pragma omp atomic compare release
1459   lx = le == lx ? ld : lx;
1460 #pragma omp atomic compare release
1461   if (lx == le)
1462     lx = ld;
1463 #pragma omp atomic compare release
1464   if (le == lx)
1465     lx = ld;
1466 
1467 #pragma omp atomic compare release
1468   ulx = ulx > ule ? ule : ulx;
1469 #pragma omp atomic compare release
1470   ulx = ulx < ule ? ule : ulx;
1471 #pragma omp atomic compare release
1472   ulx = ule > ulx ? ule : ulx;
1473 #pragma omp atomic compare release
1474   ulx = ule < ulx ? ule : ulx;
1475 #pragma omp atomic compare release
1476   if (ulx > ule)
1477     ulx = ule;
1478 #pragma omp atomic compare release
1479   if (ulx < ule)
1480     ulx = ule;
1481 #pragma omp atomic compare release
1482   if (ule > ulx)
1483     ulx = ule;
1484 #pragma omp atomic compare release
1485   if (ule < ulx)
1486     ulx = ule;
1487 
1488 #pragma omp atomic compare release
1489   ulx = ulx == ule ? uld : ulx;
1490 #pragma omp atomic compare release
1491   ulx = ule == ulx ? uld : ulx;
1492 #pragma omp atomic compare release
1493   if (ulx == ule)
1494     ulx = uld;
1495 #pragma omp atomic compare release
1496   if (ule == ulx)
1497     ulx = uld;
1498 
1499 #pragma omp atomic compare seq_cst
1500   lx = lx > le ? le : lx;
1501 #pragma omp atomic compare seq_cst
1502   lx = lx < le ? le : lx;
1503 #pragma omp atomic compare seq_cst
1504   lx = le > lx ? le : lx;
1505 #pragma omp atomic compare seq_cst
1506   lx = le < lx ? le : lx;
1507 #pragma omp atomic compare seq_cst
1508   if (lx > le)
1509     lx = le;
1510 #pragma omp atomic compare seq_cst
1511   if (lx < le)
1512     lx = le;
1513 #pragma omp atomic compare seq_cst
1514   if (le > lx)
1515     lx = le;
1516 #pragma omp atomic compare seq_cst
1517   if (le < lx)
1518     lx = le;
1519 
1520 #pragma omp atomic compare seq_cst
1521   lx = lx == le ? ld : lx;
1522 #pragma omp atomic compare seq_cst
1523   lx = le == lx ? ld : lx;
1524 #pragma omp atomic compare seq_cst
1525   if (lx == le)
1526     lx = ld;
1527 #pragma omp atomic compare seq_cst
1528   if (le == lx)
1529     lx = ld;
1530 
1531 #pragma omp atomic compare seq_cst
1532   ulx = ulx > ule ? ule : ulx;
1533 #pragma omp atomic compare seq_cst
1534   ulx = ulx < ule ? ule : ulx;
1535 #pragma omp atomic compare seq_cst
1536   ulx = ule > ulx ? ule : ulx;
1537 #pragma omp atomic compare seq_cst
1538   ulx = ule < ulx ? ule : ulx;
1539 #pragma omp atomic compare seq_cst
1540   if (ulx > ule)
1541     ulx = ule;
1542 #pragma omp atomic compare seq_cst
1543   if (ulx < ule)
1544     ulx = ule;
1545 #pragma omp atomic compare seq_cst
1546   if (ule > ulx)
1547     ulx = ule;
1548 #pragma omp atomic compare seq_cst
1549   if (ule < ulx)
1550     ulx = ule;
1551 
1552 #pragma omp atomic compare seq_cst
1553   ulx = ulx == ule ? uld : ulx;
1554 #pragma omp atomic compare seq_cst
1555   ulx = ule == ulx ? uld : ulx;
1556 #pragma omp atomic compare seq_cst
1557   if (ulx == ule)
1558     ulx = uld;
1559 #pragma omp atomic compare seq_cst
1560   if (ule == ulx)
1561     ulx = uld;
1562 
1563 #pragma omp atomic compare
1564   llx = llx > lle ? lle : llx;
1565 #pragma omp atomic compare
1566   llx = llx < lle ? lle : llx;
1567 #pragma omp atomic compare
1568   llx = lle > llx ? lle : llx;
1569 #pragma omp atomic compare
1570   llx = lle < llx ? lle : llx;
1571 #pragma omp atomic compare
1572   if (llx > lle)
1573     llx = lle;
1574 #pragma omp atomic compare
1575   if (llx < lle)
1576     llx = lle;
1577 #pragma omp atomic compare
1578   if (lle > llx)
1579     llx = lle;
1580 #pragma omp atomic compare
1581   if (lle < llx)
1582     llx = lle;
1583 
1584 #pragma omp atomic compare
1585   llx = llx == lle ? lld : llx;
1586 #pragma omp atomic compare
1587   llx = lle == llx ? lld : llx;
1588 #pragma omp atomic compare
1589   if (llx == lle)
1590     llx = lld;
1591 #pragma omp atomic compare
1592   if (lle == llx)
1593     llx = lld;
1594 
1595 #pragma omp atomic compare
1596   ullx = ullx > ulle ? ulle : ullx;
1597 #pragma omp atomic compare
1598   ullx = ullx < ulle ? ulle : ullx;
1599 #pragma omp atomic compare
1600   ullx = ulle > ullx ? ulle : ullx;
1601 #pragma omp atomic compare
1602   ullx = ulle < ullx ? ulle : ullx;
1603 #pragma omp atomic compare
1604   if (ullx > ulle)
1605     ullx = ulle;
1606 #pragma omp atomic compare
1607   if (ullx < ulle)
1608     ullx = ulle;
1609 #pragma omp atomic compare
1610   if (ulle > ullx)
1611     ullx = ulle;
1612 #pragma omp atomic compare
1613   if (ulle < ullx)
1614     ullx = ulle;
1615 
1616 #pragma omp atomic compare
1617   ullx = ullx == ulle ? ulld : ullx;
1618 #pragma omp atomic compare
1619   ullx = ulle == ullx ? ulld : ullx;
1620 #pragma omp atomic compare
1621   if (ullx == ulle)
1622     ullx = ulld;
1623 #pragma omp atomic compare
1624   if (ulle == ullx)
1625     ullx = ulld;
1626 
1627 #pragma omp atomic compare acq_rel
1628   llx = llx > lle ? lle : llx;
1629 #pragma omp atomic compare acq_rel
1630   llx = llx < lle ? lle : llx;
1631 #pragma omp atomic compare acq_rel
1632   llx = lle > llx ? lle : llx;
1633 #pragma omp atomic compare acq_rel
1634   llx = lle < llx ? lle : llx;
1635 #pragma omp atomic compare acq_rel
1636   if (llx > lle)
1637     llx = lle;
1638 #pragma omp atomic compare acq_rel
1639   if (llx < lle)
1640     llx = lle;
1641 #pragma omp atomic compare acq_rel
1642   if (lle > llx)
1643     llx = lle;
1644 #pragma omp atomic compare acq_rel
1645   if (lle < llx)
1646     llx = lle;
1647 
1648 #pragma omp atomic compare acq_rel
1649   llx = llx == lle ? lld : llx;
1650 #pragma omp atomic compare acq_rel
1651   llx = lle == llx ? lld : llx;
1652 #pragma omp atomic compare acq_rel
1653   if (llx == lle)
1654     llx = lld;
1655 #pragma omp atomic compare acq_rel
1656   if (lle == llx)
1657     llx = lld;
1658 
1659 #pragma omp atomic compare acq_rel
1660   ullx = ullx > ulle ? ulle : ullx;
1661 #pragma omp atomic compare acq_rel
1662   ullx = ullx < ulle ? ulle : ullx;
1663 #pragma omp atomic compare acq_rel
1664   ullx = ulle > ullx ? ulle : ullx;
1665 #pragma omp atomic compare acq_rel
1666   ullx = ulle < ullx ? ulle : ullx;
1667 #pragma omp atomic compare acq_rel
1668   if (ullx > ulle)
1669     ullx = ulle;
1670 #pragma omp atomic compare acq_rel
1671   if (ullx < ulle)
1672     ullx = ulle;
1673 #pragma omp atomic compare acq_rel
1674   if (ulle > ullx)
1675     ullx = ulle;
1676 #pragma omp atomic compare acq_rel
1677   if (ulle < ullx)
1678     ullx = ulle;
1679 
1680 #pragma omp atomic compare acq_rel
1681   ullx = ullx == ulle ? ulld : ullx;
1682 #pragma omp atomic compare acq_rel
1683   ullx = ulle == ullx ? ulld : ullx;
1684 #pragma omp atomic compare acq_rel
1685   if (ullx == ulle)
1686     ullx = ulld;
1687 #pragma omp atomic compare acq_rel
1688   if (ulle == ullx)
1689     ullx = ulld;
1690 
1691 #pragma omp atomic compare acquire
1692   llx = llx > lle ? lle : llx;
1693 #pragma omp atomic compare acquire
1694   llx = llx < lle ? lle : llx;
1695 #pragma omp atomic compare acquire
1696   llx = lle > llx ? lle : llx;
1697 #pragma omp atomic compare acquire
1698   llx = lle < llx ? lle : llx;
1699 #pragma omp atomic compare acquire
1700   if (llx > lle)
1701     llx = lle;
1702 #pragma omp atomic compare acquire
1703   if (llx < lle)
1704     llx = lle;
1705 #pragma omp atomic compare acquire
1706   if (lle > llx)
1707     llx = lle;
1708 #pragma omp atomic compare acquire
1709   if (lle < llx)
1710     llx = lle;
1711 
1712 #pragma omp atomic compare acquire
1713   llx = llx == lle ? lld : llx;
1714 #pragma omp atomic compare acquire
1715   llx = lle == llx ? lld : llx;
1716 #pragma omp atomic compare acquire
1717   if (llx == lle)
1718     llx = lld;
1719 #pragma omp atomic compare acquire
1720   if (lle == llx)
1721     llx = lld;
1722 
1723 #pragma omp atomic compare acquire
1724   ullx = ullx > ulle ? ulle : ullx;
1725 #pragma omp atomic compare acquire
1726   ullx = ullx < ulle ? ulle : ullx;
1727 #pragma omp atomic compare acquire
1728   ullx = ulle > ullx ? ulle : ullx;
1729 #pragma omp atomic compare acquire
1730   ullx = ulle < ullx ? ulle : ullx;
1731 #pragma omp atomic compare acquire
1732   if (ullx > ulle)
1733     ullx = ulle;
1734 #pragma omp atomic compare acquire
1735   if (ullx < ulle)
1736     ullx = ulle;
1737 #pragma omp atomic compare acquire
1738   if (ulle > ullx)
1739     ullx = ulle;
1740 #pragma omp atomic compare acquire
1741   if (ulle < ullx)
1742     ullx = ulle;
1743 
1744 #pragma omp atomic compare acquire
1745   ullx = ullx == ulle ? ulld : ullx;
1746 #pragma omp atomic compare acquire
1747   ullx = ulle == ullx ? ulld : ullx;
1748 #pragma omp atomic compare acquire
1749   if (ullx == ulle)
1750     ullx = ulld;
1751 #pragma omp atomic compare acquire
1752   if (ulle == ullx)
1753     ullx = ulld;
1754 
1755 #pragma omp atomic compare relaxed
1756   llx = llx > lle ? lle : llx;
1757 #pragma omp atomic compare relaxed
1758   llx = llx < lle ? lle : llx;
1759 #pragma omp atomic compare relaxed
1760   llx = lle > llx ? lle : llx;
1761 #pragma omp atomic compare relaxed
1762   llx = lle < llx ? lle : llx;
1763 #pragma omp atomic compare relaxed
1764   if (llx > lle)
1765     llx = lle;
1766 #pragma omp atomic compare relaxed
1767   if (llx < lle)
1768     llx = lle;
1769 #pragma omp atomic compare relaxed
1770   if (lle > llx)
1771     llx = lle;
1772 #pragma omp atomic compare relaxed
1773   if (lle < llx)
1774     llx = lle;
1775 
1776 #pragma omp atomic compare relaxed
1777   llx = llx == lle ? lld : llx;
1778 #pragma omp atomic compare relaxed
1779   llx = lle == llx ? lld : llx;
1780 #pragma omp atomic compare relaxed
1781   if (llx == lle)
1782     llx = lld;
1783 #pragma omp atomic compare relaxed
1784   if (lle == llx)
1785     llx = lld;
1786 
1787 #pragma omp atomic compare relaxed
1788   ullx = ullx > ulle ? ulle : ullx;
1789 #pragma omp atomic compare relaxed
1790   ullx = ullx < ulle ? ulle : ullx;
1791 #pragma omp atomic compare relaxed
1792   ullx = ulle > ullx ? ulle : ullx;
1793 #pragma omp atomic compare relaxed
1794   ullx = ulle < ullx ? ulle : ullx;
1795 #pragma omp atomic compare relaxed
1796   if (ullx > ulle)
1797     ullx = ulle;
1798 #pragma omp atomic compare relaxed
1799   if (ullx < ulle)
1800     ullx = ulle;
1801 #pragma omp atomic compare relaxed
1802   if (ulle > ullx)
1803     ullx = ulle;
1804 #pragma omp atomic compare relaxed
1805   if (ulle < ullx)
1806     ullx = ulle;
1807 
1808 #pragma omp atomic compare relaxed
1809   ullx = ullx == ulle ? ulld : ullx;
1810 #pragma omp atomic compare relaxed
1811   ullx = ulle == ullx ? ulld : ullx;
1812 #pragma omp atomic compare relaxed
1813   if (ullx == ulle)
1814     ullx = ulld;
1815 #pragma omp atomic compare relaxed
1816   if (ulle == ullx)
1817     ullx = ulld;
1818 
1819 #pragma omp atomic compare release
1820   llx = llx > lle ? lle : llx;
1821 #pragma omp atomic compare release
1822   llx = llx < lle ? lle : llx;
1823 #pragma omp atomic compare release
1824   llx = lle > llx ? lle : llx;
1825 #pragma omp atomic compare release
1826   llx = lle < llx ? lle : llx;
1827 #pragma omp atomic compare release
1828   if (llx > lle)
1829     llx = lle;
1830 #pragma omp atomic compare release
1831   if (llx < lle)
1832     llx = lle;
1833 #pragma omp atomic compare release
1834   if (lle > llx)
1835     llx = lle;
1836 #pragma omp atomic compare release
1837   if (lle < llx)
1838     llx = lle;
1839 
1840 #pragma omp atomic compare release
1841   llx = llx == lle ? lld : llx;
1842 #pragma omp atomic compare release
1843   llx = lle == llx ? lld : llx;
1844 #pragma omp atomic compare release
1845   if (llx == lle)
1846     llx = lld;
1847 #pragma omp atomic compare release
1848   if (lle == llx)
1849     llx = lld;
1850 
1851 #pragma omp atomic compare release
1852   ullx = ullx > ulle ? ulle : ullx;
1853 #pragma omp atomic compare release
1854   ullx = ullx < ulle ? ulle : ullx;
1855 #pragma omp atomic compare release
1856   ullx = ulle > ullx ? ulle : ullx;
1857 #pragma omp atomic compare release
1858   ullx = ulle < ullx ? ulle : ullx;
1859 #pragma omp atomic compare release
1860   if (ullx > ulle)
1861     ullx = ulle;
1862 #pragma omp atomic compare release
1863   if (ullx < ulle)
1864     ullx = ulle;
1865 #pragma omp atomic compare release
1866   if (ulle > ullx)
1867     ullx = ulle;
1868 #pragma omp atomic compare release
1869   if (ulle < ullx)
1870     ullx = ulle;
1871 
1872 #pragma omp atomic compare release
1873   ullx = ullx == ulle ? ulld : ullx;
1874 #pragma omp atomic compare release
1875   ullx = ulle == ullx ? ulld : ullx;
1876 #pragma omp atomic compare release
1877   if (ullx == ulle)
1878     ullx = ulld;
1879 #pragma omp atomic compare release
1880   if (ulle == ullx)
1881     ullx = ulld;
1882 
1883 #pragma omp atomic compare seq_cst
1884   llx = llx > lle ? lle : llx;
1885 #pragma omp atomic compare seq_cst
1886   llx = llx < lle ? lle : llx;
1887 #pragma omp atomic compare seq_cst
1888   llx = lle > llx ? lle : llx;
1889 #pragma omp atomic compare seq_cst
1890   llx = lle < llx ? lle : llx;
1891 #pragma omp atomic compare seq_cst
1892   if (llx > lle)
1893     llx = lle;
1894 #pragma omp atomic compare seq_cst
1895   if (llx < lle)
1896     llx = lle;
1897 #pragma omp atomic compare seq_cst
1898   if (lle > llx)
1899     llx = lle;
1900 #pragma omp atomic compare seq_cst
1901   if (lle < llx)
1902     llx = lle;
1903 
1904 #pragma omp atomic compare seq_cst
1905   llx = llx == lle ? lld : llx;
1906 #pragma omp atomic compare seq_cst
1907   llx = lle == llx ? lld : llx;
1908 #pragma omp atomic compare seq_cst
1909   if (llx == lle)
1910     llx = lld;
1911 #pragma omp atomic compare seq_cst
1912   if (lle == llx)
1913     llx = lld;
1914 
1915 #pragma omp atomic compare seq_cst
1916   ullx = ullx > ulle ? ulle : ullx;
1917 #pragma omp atomic compare seq_cst
1918   ullx = ullx < ulle ? ulle : ullx;
1919 #pragma omp atomic compare seq_cst
1920   ullx = ulle > ullx ? ulle : ullx;
1921 #pragma omp atomic compare seq_cst
1922   ullx = ulle < ullx ? ulle : ullx;
1923 #pragma omp atomic compare seq_cst
1924   if (ullx > ulle)
1925     ullx = ulle;
1926 #pragma omp atomic compare seq_cst
1927   if (ullx < ulle)
1928     ullx = ulle;
1929 #pragma omp atomic compare seq_cst
1930   if (ulle > ullx)
1931     ullx = ulle;
1932 #pragma omp atomic compare seq_cst
1933   if (ulle < ullx)
1934     ullx = ulle;
1935 
1936 #pragma omp atomic compare seq_cst
1937   ullx = ullx == ulle ? ulld : ullx;
1938 #pragma omp atomic compare seq_cst
1939   ullx = ulle == ullx ? ulld : ullx;
1940 #pragma omp atomic compare seq_cst
1941   if (ullx == ulle)
1942     ullx = ulld;
1943 #pragma omp atomic compare seq_cst
1944   if (ulle == ullx)
1945     ullx = ulld;
1946 }
1947 
1948 #endif
1949 // CHECK-LABEL: @foo(
1950 // CHECK-NEXT:  entry:
1951 // CHECK-NEXT:    [[CX:%.*]] = alloca i8, align 1
1952 // CHECK-NEXT:    [[CE:%.*]] = alloca i8, align 1
1953 // CHECK-NEXT:    [[CD:%.*]] = alloca i8, align 1
1954 // CHECK-NEXT:    [[UCX:%.*]] = alloca i8, align 1
1955 // CHECK-NEXT:    [[UCE:%.*]] = alloca i8, align 1
1956 // CHECK-NEXT:    [[UCD:%.*]] = alloca i8, align 1
1957 // CHECK-NEXT:    [[SX:%.*]] = alloca i16, align 2
1958 // CHECK-NEXT:    [[SE:%.*]] = alloca i16, align 2
1959 // CHECK-NEXT:    [[SD:%.*]] = alloca i16, align 2
1960 // CHECK-NEXT:    [[USX:%.*]] = alloca i16, align 2
1961 // CHECK-NEXT:    [[USE:%.*]] = alloca i16, align 2
1962 // CHECK-NEXT:    [[USD:%.*]] = alloca i16, align 2
1963 // CHECK-NEXT:    [[IX:%.*]] = alloca i32, align 4
1964 // CHECK-NEXT:    [[IE:%.*]] = alloca i32, align 4
1965 // CHECK-NEXT:    [[ID:%.*]] = alloca i32, align 4
1966 // CHECK-NEXT:    [[UIX:%.*]] = alloca i32, align 4
1967 // CHECK-NEXT:    [[UIE:%.*]] = alloca i32, align 4
1968 // CHECK-NEXT:    [[UID:%.*]] = alloca i32, align 4
1969 // CHECK-NEXT:    [[LX:%.*]] = alloca i64, align 8
1970 // CHECK-NEXT:    [[LE:%.*]] = alloca i64, align 8
1971 // CHECK-NEXT:    [[LD:%.*]] = alloca i64, align 8
1972 // CHECK-NEXT:    [[ULX:%.*]] = alloca i64, align 8
1973 // CHECK-NEXT:    [[ULE:%.*]] = alloca i64, align 8
1974 // CHECK-NEXT:    [[ULD:%.*]] = alloca i64, align 8
1975 // CHECK-NEXT:    [[LLX:%.*]] = alloca i64, align 8
1976 // CHECK-NEXT:    [[LLE:%.*]] = alloca i64, align 8
1977 // CHECK-NEXT:    [[LLD:%.*]] = alloca i64, align 8
1978 // CHECK-NEXT:    [[ULLX:%.*]] = alloca i64, align 8
1979 // CHECK-NEXT:    [[ULLE:%.*]] = alloca i64, align 8
1980 // CHECK-NEXT:    [[ULLD:%.*]] = alloca i64, align 8
1981 // CHECK-NEXT:    [[TMP0:%.*]] = load i8, i8* [[CE]], align 1
1982 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP0]] monotonic, align 1
1983 // CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* [[CE]], align 1
1984 // CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP2]] monotonic, align 1
1985 // CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CE]], align 1
1986 // CHECK-NEXT:    [[TMP5:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP4]] monotonic, align 1
1987 // CHECK-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CE]], align 1
1988 // CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP6]] monotonic, align 1
1989 // CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[CE]], align 1
1990 // CHECK-NEXT:    [[TMP9:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP8]] monotonic, align 1
1991 // CHECK-NEXT:    [[TMP10:%.*]] = load i8, i8* [[CE]], align 1
1992 // CHECK-NEXT:    [[TMP11:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP10]] monotonic, align 1
1993 // CHECK-NEXT:    [[TMP12:%.*]] = load i8, i8* [[CE]], align 1
1994 // CHECK-NEXT:    [[TMP13:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP12]] monotonic, align 1
1995 // CHECK-NEXT:    [[TMP14:%.*]] = load i8, i8* [[CE]], align 1
1996 // CHECK-NEXT:    [[TMP15:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP14]] monotonic, align 1
1997 // CHECK-NEXT:    [[TMP16:%.*]] = load i8, i8* [[CE]], align 1
1998 // CHECK-NEXT:    [[TMP17:%.*]] = load i8, i8* [[CD]], align 1
1999 // CHECK-NEXT:    [[TMP18:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP16]], i8 [[TMP17]] monotonic monotonic, align 1
2000 // CHECK-NEXT:    [[TMP19:%.*]] = load i8, i8* [[CE]], align 1
2001 // CHECK-NEXT:    [[TMP20:%.*]] = load i8, i8* [[CD]], align 1
2002 // CHECK-NEXT:    [[TMP21:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP19]], i8 [[TMP20]] monotonic monotonic, align 1
2003 // CHECK-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CE]], align 1
2004 // CHECK-NEXT:    [[TMP23:%.*]] = load i8, i8* [[CD]], align 1
2005 // CHECK-NEXT:    [[TMP24:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP22]], i8 [[TMP23]] monotonic monotonic, align 1
2006 // CHECK-NEXT:    [[TMP25:%.*]] = load i8, i8* [[CE]], align 1
2007 // CHECK-NEXT:    [[TMP26:%.*]] = load i8, i8* [[CD]], align 1
2008 // CHECK-NEXT:    [[TMP27:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP25]], i8 [[TMP26]] monotonic monotonic, align 1
2009 // CHECK-NEXT:    [[TMP28:%.*]] = load i8, i8* [[UCE]], align 1
2010 // CHECK-NEXT:    [[TMP29:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP28]] monotonic, align 1
2011 // CHECK-NEXT:    [[TMP30:%.*]] = load i8, i8* [[UCE]], align 1
2012 // CHECK-NEXT:    [[TMP31:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP30]] monotonic, align 1
2013 // CHECK-NEXT:    [[TMP32:%.*]] = load i8, i8* [[UCE]], align 1
2014 // CHECK-NEXT:    [[TMP33:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP32]] monotonic, align 1
2015 // CHECK-NEXT:    [[TMP34:%.*]] = load i8, i8* [[UCE]], align 1
2016 // CHECK-NEXT:    [[TMP35:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP34]] monotonic, align 1
2017 // CHECK-NEXT:    [[TMP36:%.*]] = load i8, i8* [[UCE]], align 1
2018 // CHECK-NEXT:    [[TMP37:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP36]] monotonic, align 1
2019 // CHECK-NEXT:    [[TMP38:%.*]] = load i8, i8* [[UCE]], align 1
2020 // CHECK-NEXT:    [[TMP39:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP38]] monotonic, align 1
2021 // CHECK-NEXT:    [[TMP40:%.*]] = load i8, i8* [[UCE]], align 1
2022 // CHECK-NEXT:    [[TMP41:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP40]] monotonic, align 1
2023 // CHECK-NEXT:    [[TMP42:%.*]] = load i8, i8* [[UCE]], align 1
2024 // CHECK-NEXT:    [[TMP43:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP42]] monotonic, align 1
2025 // CHECK-NEXT:    [[TMP44:%.*]] = load i8, i8* [[UCE]], align 1
2026 // CHECK-NEXT:    [[TMP45:%.*]] = load i8, i8* [[UCD]], align 1
2027 // CHECK-NEXT:    [[TMP46:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP44]], i8 [[TMP45]] monotonic monotonic, align 1
2028 // CHECK-NEXT:    [[TMP47:%.*]] = load i8, i8* [[UCE]], align 1
2029 // CHECK-NEXT:    [[TMP48:%.*]] = load i8, i8* [[UCD]], align 1
2030 // CHECK-NEXT:    [[TMP49:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP47]], i8 [[TMP48]] monotonic monotonic, align 1
2031 // CHECK-NEXT:    [[TMP50:%.*]] = load i8, i8* [[UCE]], align 1
2032 // CHECK-NEXT:    [[TMP51:%.*]] = load i8, i8* [[UCD]], align 1
2033 // CHECK-NEXT:    [[TMP52:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP50]], i8 [[TMP51]] monotonic monotonic, align 1
2034 // CHECK-NEXT:    [[TMP53:%.*]] = load i8, i8* [[UCE]], align 1
2035 // CHECK-NEXT:    [[TMP54:%.*]] = load i8, i8* [[UCD]], align 1
2036 // CHECK-NEXT:    [[TMP55:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP53]], i8 [[TMP54]] monotonic monotonic, align 1
2037 // CHECK-NEXT:    [[TMP56:%.*]] = load i8, i8* [[CE]], align 1
2038 // CHECK-NEXT:    [[TMP57:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP56]] acq_rel, align 1
2039 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1:[0-9]+]])
2040 // CHECK-NEXT:    [[TMP58:%.*]] = load i8, i8* [[CE]], align 1
2041 // CHECK-NEXT:    [[TMP59:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP58]] acq_rel, align 1
2042 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2043 // CHECK-NEXT:    [[TMP60:%.*]] = load i8, i8* [[CE]], align 1
2044 // CHECK-NEXT:    [[TMP61:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP60]] acq_rel, align 1
2045 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2046 // CHECK-NEXT:    [[TMP62:%.*]] = load i8, i8* [[CE]], align 1
2047 // CHECK-NEXT:    [[TMP63:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP62]] acq_rel, align 1
2048 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2049 // CHECK-NEXT:    [[TMP64:%.*]] = load i8, i8* [[CE]], align 1
2050 // CHECK-NEXT:    [[TMP65:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP64]] acq_rel, align 1
2051 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2052 // CHECK-NEXT:    [[TMP66:%.*]] = load i8, i8* [[CE]], align 1
2053 // CHECK-NEXT:    [[TMP67:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP66]] acq_rel, align 1
2054 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2055 // CHECK-NEXT:    [[TMP68:%.*]] = load i8, i8* [[CE]], align 1
2056 // CHECK-NEXT:    [[TMP69:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP68]] acq_rel, align 1
2057 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2058 // CHECK-NEXT:    [[TMP70:%.*]] = load i8, i8* [[CE]], align 1
2059 // CHECK-NEXT:    [[TMP71:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP70]] acq_rel, align 1
2060 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2061 // CHECK-NEXT:    [[TMP72:%.*]] = load i8, i8* [[CE]], align 1
2062 // CHECK-NEXT:    [[TMP73:%.*]] = load i8, i8* [[CD]], align 1
2063 // CHECK-NEXT:    [[TMP74:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP72]], i8 [[TMP73]] acq_rel acquire, align 1
2064 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2065 // CHECK-NEXT:    [[TMP75:%.*]] = load i8, i8* [[CE]], align 1
2066 // CHECK-NEXT:    [[TMP76:%.*]] = load i8, i8* [[CD]], align 1
2067 // CHECK-NEXT:    [[TMP77:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP75]], i8 [[TMP76]] acq_rel acquire, align 1
2068 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2069 // CHECK-NEXT:    [[TMP78:%.*]] = load i8, i8* [[CE]], align 1
2070 // CHECK-NEXT:    [[TMP79:%.*]] = load i8, i8* [[CD]], align 1
2071 // CHECK-NEXT:    [[TMP80:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP78]], i8 [[TMP79]] acq_rel acquire, align 1
2072 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2073 // CHECK-NEXT:    [[TMP81:%.*]] = load i8, i8* [[CE]], align 1
2074 // CHECK-NEXT:    [[TMP82:%.*]] = load i8, i8* [[CD]], align 1
2075 // CHECK-NEXT:    [[TMP83:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP81]], i8 [[TMP82]] acq_rel acquire, align 1
2076 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2077 // CHECK-NEXT:    [[TMP84:%.*]] = load i8, i8* [[UCE]], align 1
2078 // CHECK-NEXT:    [[TMP85:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP84]] acq_rel, align 1
2079 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2080 // CHECK-NEXT:    [[TMP86:%.*]] = load i8, i8* [[UCE]], align 1
2081 // CHECK-NEXT:    [[TMP87:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP86]] acq_rel, align 1
2082 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2083 // CHECK-NEXT:    [[TMP88:%.*]] = load i8, i8* [[UCE]], align 1
2084 // CHECK-NEXT:    [[TMP89:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP88]] acq_rel, align 1
2085 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2086 // CHECK-NEXT:    [[TMP90:%.*]] = load i8, i8* [[UCE]], align 1
2087 // CHECK-NEXT:    [[TMP91:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP90]] acq_rel, align 1
2088 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2089 // CHECK-NEXT:    [[TMP92:%.*]] = load i8, i8* [[UCE]], align 1
2090 // CHECK-NEXT:    [[TMP93:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP92]] acq_rel, align 1
2091 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2092 // CHECK-NEXT:    [[TMP94:%.*]] = load i8, i8* [[UCE]], align 1
2093 // CHECK-NEXT:    [[TMP95:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP94]] acq_rel, align 1
2094 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2095 // CHECK-NEXT:    [[TMP96:%.*]] = load i8, i8* [[UCE]], align 1
2096 // CHECK-NEXT:    [[TMP97:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP96]] acq_rel, align 1
2097 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2098 // CHECK-NEXT:    [[TMP98:%.*]] = load i8, i8* [[UCE]], align 1
2099 // CHECK-NEXT:    [[TMP99:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP98]] acq_rel, align 1
2100 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2101 // CHECK-NEXT:    [[TMP100:%.*]] = load i8, i8* [[UCE]], align 1
2102 // CHECK-NEXT:    [[TMP101:%.*]] = load i8, i8* [[UCD]], align 1
2103 // CHECK-NEXT:    [[TMP102:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP100]], i8 [[TMP101]] acq_rel acquire, align 1
2104 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2105 // CHECK-NEXT:    [[TMP103:%.*]] = load i8, i8* [[UCE]], align 1
2106 // CHECK-NEXT:    [[TMP104:%.*]] = load i8, i8* [[UCD]], align 1
2107 // CHECK-NEXT:    [[TMP105:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP103]], i8 [[TMP104]] acq_rel acquire, align 1
2108 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2109 // CHECK-NEXT:    [[TMP106:%.*]] = load i8, i8* [[UCE]], align 1
2110 // CHECK-NEXT:    [[TMP107:%.*]] = load i8, i8* [[UCD]], align 1
2111 // CHECK-NEXT:    [[TMP108:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP106]], i8 [[TMP107]] acq_rel acquire, align 1
2112 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2113 // CHECK-NEXT:    [[TMP109:%.*]] = load i8, i8* [[UCE]], align 1
2114 // CHECK-NEXT:    [[TMP110:%.*]] = load i8, i8* [[UCD]], align 1
2115 // CHECK-NEXT:    [[TMP111:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP109]], i8 [[TMP110]] acq_rel acquire, align 1
2116 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2117 // CHECK-NEXT:    [[TMP112:%.*]] = load i8, i8* [[CE]], align 1
2118 // CHECK-NEXT:    [[TMP113:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP112]] acquire, align 1
2119 // CHECK-NEXT:    [[TMP114:%.*]] = load i8, i8* [[CE]], align 1
2120 // CHECK-NEXT:    [[TMP115:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP114]] acquire, align 1
2121 // CHECK-NEXT:    [[TMP116:%.*]] = load i8, i8* [[CE]], align 1
2122 // CHECK-NEXT:    [[TMP117:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP116]] acquire, align 1
2123 // CHECK-NEXT:    [[TMP118:%.*]] = load i8, i8* [[CE]], align 1
2124 // CHECK-NEXT:    [[TMP119:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP118]] acquire, align 1
2125 // CHECK-NEXT:    [[TMP120:%.*]] = load i8, i8* [[CE]], align 1
2126 // CHECK-NEXT:    [[TMP121:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP120]] acquire, align 1
2127 // CHECK-NEXT:    [[TMP122:%.*]] = load i8, i8* [[CE]], align 1
2128 // CHECK-NEXT:    [[TMP123:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP122]] acquire, align 1
2129 // CHECK-NEXT:    [[TMP124:%.*]] = load i8, i8* [[CE]], align 1
2130 // CHECK-NEXT:    [[TMP125:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP124]] acquire, align 1
2131 // CHECK-NEXT:    [[TMP126:%.*]] = load i8, i8* [[CE]], align 1
2132 // CHECK-NEXT:    [[TMP127:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP126]] acquire, align 1
2133 // CHECK-NEXT:    [[TMP128:%.*]] = load i8, i8* [[CE]], align 1
2134 // CHECK-NEXT:    [[TMP129:%.*]] = load i8, i8* [[CD]], align 1
2135 // CHECK-NEXT:    [[TMP130:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP128]], i8 [[TMP129]] acquire acquire, align 1
2136 // CHECK-NEXT:    [[TMP131:%.*]] = load i8, i8* [[CE]], align 1
2137 // CHECK-NEXT:    [[TMP132:%.*]] = load i8, i8* [[CD]], align 1
2138 // CHECK-NEXT:    [[TMP133:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP131]], i8 [[TMP132]] acquire acquire, align 1
2139 // CHECK-NEXT:    [[TMP134:%.*]] = load i8, i8* [[CE]], align 1
2140 // CHECK-NEXT:    [[TMP135:%.*]] = load i8, i8* [[CD]], align 1
2141 // CHECK-NEXT:    [[TMP136:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP134]], i8 [[TMP135]] acquire acquire, align 1
2142 // CHECK-NEXT:    [[TMP137:%.*]] = load i8, i8* [[CE]], align 1
2143 // CHECK-NEXT:    [[TMP138:%.*]] = load i8, i8* [[CD]], align 1
2144 // CHECK-NEXT:    [[TMP139:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP137]], i8 [[TMP138]] acquire acquire, align 1
2145 // CHECK-NEXT:    [[TMP140:%.*]] = load i8, i8* [[UCE]], align 1
2146 // CHECK-NEXT:    [[TMP141:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP140]] acquire, align 1
2147 // CHECK-NEXT:    [[TMP142:%.*]] = load i8, i8* [[UCE]], align 1
2148 // CHECK-NEXT:    [[TMP143:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP142]] acquire, align 1
2149 // CHECK-NEXT:    [[TMP144:%.*]] = load i8, i8* [[UCE]], align 1
2150 // CHECK-NEXT:    [[TMP145:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP144]] acquire, align 1
2151 // CHECK-NEXT:    [[TMP146:%.*]] = load i8, i8* [[UCE]], align 1
2152 // CHECK-NEXT:    [[TMP147:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP146]] acquire, align 1
2153 // CHECK-NEXT:    [[TMP148:%.*]] = load i8, i8* [[UCE]], align 1
2154 // CHECK-NEXT:    [[TMP149:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP148]] acquire, align 1
2155 // CHECK-NEXT:    [[TMP150:%.*]] = load i8, i8* [[UCE]], align 1
2156 // CHECK-NEXT:    [[TMP151:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP150]] acquire, align 1
2157 // CHECK-NEXT:    [[TMP152:%.*]] = load i8, i8* [[UCE]], align 1
2158 // CHECK-NEXT:    [[TMP153:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP152]] acquire, align 1
2159 // CHECK-NEXT:    [[TMP154:%.*]] = load i8, i8* [[UCE]], align 1
2160 // CHECK-NEXT:    [[TMP155:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP154]] acquire, align 1
2161 // CHECK-NEXT:    [[TMP156:%.*]] = load i8, i8* [[UCE]], align 1
2162 // CHECK-NEXT:    [[TMP157:%.*]] = load i8, i8* [[UCD]], align 1
2163 // CHECK-NEXT:    [[TMP158:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP156]], i8 [[TMP157]] acquire acquire, align 1
2164 // CHECK-NEXT:    [[TMP159:%.*]] = load i8, i8* [[UCE]], align 1
2165 // CHECK-NEXT:    [[TMP160:%.*]] = load i8, i8* [[UCD]], align 1
2166 // CHECK-NEXT:    [[TMP161:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP159]], i8 [[TMP160]] acquire acquire, align 1
2167 // CHECK-NEXT:    [[TMP162:%.*]] = load i8, i8* [[UCE]], align 1
2168 // CHECK-NEXT:    [[TMP163:%.*]] = load i8, i8* [[UCD]], align 1
2169 // CHECK-NEXT:    [[TMP164:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP162]], i8 [[TMP163]] acquire acquire, align 1
2170 // CHECK-NEXT:    [[TMP165:%.*]] = load i8, i8* [[UCE]], align 1
2171 // CHECK-NEXT:    [[TMP166:%.*]] = load i8, i8* [[UCD]], align 1
2172 // CHECK-NEXT:    [[TMP167:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP165]], i8 [[TMP166]] acquire acquire, align 1
2173 // CHECK-NEXT:    [[TMP168:%.*]] = load i8, i8* [[CE]], align 1
2174 // CHECK-NEXT:    [[TMP169:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP168]] monotonic, align 1
2175 // CHECK-NEXT:    [[TMP170:%.*]] = load i8, i8* [[CE]], align 1
2176 // CHECK-NEXT:    [[TMP171:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP170]] monotonic, align 1
2177 // CHECK-NEXT:    [[TMP172:%.*]] = load i8, i8* [[CE]], align 1
2178 // CHECK-NEXT:    [[TMP173:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP172]] monotonic, align 1
2179 // CHECK-NEXT:    [[TMP174:%.*]] = load i8, i8* [[CE]], align 1
2180 // CHECK-NEXT:    [[TMP175:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP174]] monotonic, align 1
2181 // CHECK-NEXT:    [[TMP176:%.*]] = load i8, i8* [[CE]], align 1
2182 // CHECK-NEXT:    [[TMP177:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP176]] monotonic, align 1
2183 // CHECK-NEXT:    [[TMP178:%.*]] = load i8, i8* [[CE]], align 1
2184 // CHECK-NEXT:    [[TMP179:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP178]] monotonic, align 1
2185 // CHECK-NEXT:    [[TMP180:%.*]] = load i8, i8* [[CE]], align 1
2186 // CHECK-NEXT:    [[TMP181:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP180]] monotonic, align 1
2187 // CHECK-NEXT:    [[TMP182:%.*]] = load i8, i8* [[CE]], align 1
2188 // CHECK-NEXT:    [[TMP183:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP182]] monotonic, align 1
2189 // CHECK-NEXT:    [[TMP184:%.*]] = load i8, i8* [[CE]], align 1
2190 // CHECK-NEXT:    [[TMP185:%.*]] = load i8, i8* [[CD]], align 1
2191 // CHECK-NEXT:    [[TMP186:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP184]], i8 [[TMP185]] monotonic monotonic, align 1
2192 // CHECK-NEXT:    [[TMP187:%.*]] = load i8, i8* [[CE]], align 1
2193 // CHECK-NEXT:    [[TMP188:%.*]] = load i8, i8* [[CD]], align 1
2194 // CHECK-NEXT:    [[TMP189:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP187]], i8 [[TMP188]] monotonic monotonic, align 1
2195 // CHECK-NEXT:    [[TMP190:%.*]] = load i8, i8* [[CE]], align 1
2196 // CHECK-NEXT:    [[TMP191:%.*]] = load i8, i8* [[CD]], align 1
2197 // CHECK-NEXT:    [[TMP192:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP190]], i8 [[TMP191]] monotonic monotonic, align 1
2198 // CHECK-NEXT:    [[TMP193:%.*]] = load i8, i8* [[CE]], align 1
2199 // CHECK-NEXT:    [[TMP194:%.*]] = load i8, i8* [[CD]], align 1
2200 // CHECK-NEXT:    [[TMP195:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP193]], i8 [[TMP194]] monotonic monotonic, align 1
2201 // CHECK-NEXT:    [[TMP196:%.*]] = load i8, i8* [[UCE]], align 1
2202 // CHECK-NEXT:    [[TMP197:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP196]] monotonic, align 1
2203 // CHECK-NEXT:    [[TMP198:%.*]] = load i8, i8* [[UCE]], align 1
2204 // CHECK-NEXT:    [[TMP199:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP198]] monotonic, align 1
2205 // CHECK-NEXT:    [[TMP200:%.*]] = load i8, i8* [[UCE]], align 1
2206 // CHECK-NEXT:    [[TMP201:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP200]] monotonic, align 1
2207 // CHECK-NEXT:    [[TMP202:%.*]] = load i8, i8* [[UCE]], align 1
2208 // CHECK-NEXT:    [[TMP203:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP202]] monotonic, align 1
2209 // CHECK-NEXT:    [[TMP204:%.*]] = load i8, i8* [[UCE]], align 1
2210 // CHECK-NEXT:    [[TMP205:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP204]] monotonic, align 1
2211 // CHECK-NEXT:    [[TMP206:%.*]] = load i8, i8* [[UCE]], align 1
2212 // CHECK-NEXT:    [[TMP207:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP206]] monotonic, align 1
2213 // CHECK-NEXT:    [[TMP208:%.*]] = load i8, i8* [[UCE]], align 1
2214 // CHECK-NEXT:    [[TMP209:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP208]] monotonic, align 1
2215 // CHECK-NEXT:    [[TMP210:%.*]] = load i8, i8* [[UCE]], align 1
2216 // CHECK-NEXT:    [[TMP211:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP210]] monotonic, align 1
2217 // CHECK-NEXT:    [[TMP212:%.*]] = load i8, i8* [[UCE]], align 1
2218 // CHECK-NEXT:    [[TMP213:%.*]] = load i8, i8* [[UCD]], align 1
2219 // CHECK-NEXT:    [[TMP214:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP212]], i8 [[TMP213]] monotonic monotonic, align 1
2220 // CHECK-NEXT:    [[TMP215:%.*]] = load i8, i8* [[UCE]], align 1
2221 // CHECK-NEXT:    [[TMP216:%.*]] = load i8, i8* [[UCD]], align 1
2222 // CHECK-NEXT:    [[TMP217:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP215]], i8 [[TMP216]] monotonic monotonic, align 1
2223 // CHECK-NEXT:    [[TMP218:%.*]] = load i8, i8* [[UCE]], align 1
2224 // CHECK-NEXT:    [[TMP219:%.*]] = load i8, i8* [[UCD]], align 1
2225 // CHECK-NEXT:    [[TMP220:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP218]], i8 [[TMP219]] monotonic monotonic, align 1
2226 // CHECK-NEXT:    [[TMP221:%.*]] = load i8, i8* [[UCE]], align 1
2227 // CHECK-NEXT:    [[TMP222:%.*]] = load i8, i8* [[UCD]], align 1
2228 // CHECK-NEXT:    [[TMP223:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP221]], i8 [[TMP222]] monotonic monotonic, align 1
2229 // CHECK-NEXT:    [[TMP224:%.*]] = load i8, i8* [[CE]], align 1
2230 // CHECK-NEXT:    [[TMP225:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP224]] release, align 1
2231 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2232 // CHECK-NEXT:    [[TMP226:%.*]] = load i8, i8* [[CE]], align 1
2233 // CHECK-NEXT:    [[TMP227:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP226]] release, align 1
2234 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2235 // CHECK-NEXT:    [[TMP228:%.*]] = load i8, i8* [[CE]], align 1
2236 // CHECK-NEXT:    [[TMP229:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP228]] release, align 1
2237 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2238 // CHECK-NEXT:    [[TMP230:%.*]] = load i8, i8* [[CE]], align 1
2239 // CHECK-NEXT:    [[TMP231:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP230]] release, align 1
2240 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2241 // CHECK-NEXT:    [[TMP232:%.*]] = load i8, i8* [[CE]], align 1
2242 // CHECK-NEXT:    [[TMP233:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP232]] release, align 1
2243 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2244 // CHECK-NEXT:    [[TMP234:%.*]] = load i8, i8* [[CE]], align 1
2245 // CHECK-NEXT:    [[TMP235:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP234]] release, align 1
2246 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2247 // CHECK-NEXT:    [[TMP236:%.*]] = load i8, i8* [[CE]], align 1
2248 // CHECK-NEXT:    [[TMP237:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP236]] release, align 1
2249 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2250 // CHECK-NEXT:    [[TMP238:%.*]] = load i8, i8* [[CE]], align 1
2251 // CHECK-NEXT:    [[TMP239:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP238]] release, align 1
2252 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2253 // CHECK-NEXT:    [[TMP240:%.*]] = load i8, i8* [[CE]], align 1
2254 // CHECK-NEXT:    [[TMP241:%.*]] = load i8, i8* [[CD]], align 1
2255 // CHECK-NEXT:    [[TMP242:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP240]], i8 [[TMP241]] release monotonic, align 1
2256 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2257 // CHECK-NEXT:    [[TMP243:%.*]] = load i8, i8* [[CE]], align 1
2258 // CHECK-NEXT:    [[TMP244:%.*]] = load i8, i8* [[CD]], align 1
2259 // CHECK-NEXT:    [[TMP245:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP243]], i8 [[TMP244]] release monotonic, align 1
2260 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2261 // CHECK-NEXT:    [[TMP246:%.*]] = load i8, i8* [[CE]], align 1
2262 // CHECK-NEXT:    [[TMP247:%.*]] = load i8, i8* [[CD]], align 1
2263 // CHECK-NEXT:    [[TMP248:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP246]], i8 [[TMP247]] release monotonic, align 1
2264 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2265 // CHECK-NEXT:    [[TMP249:%.*]] = load i8, i8* [[CE]], align 1
2266 // CHECK-NEXT:    [[TMP250:%.*]] = load i8, i8* [[CD]], align 1
2267 // CHECK-NEXT:    [[TMP251:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP249]], i8 [[TMP250]] release monotonic, align 1
2268 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2269 // CHECK-NEXT:    [[TMP252:%.*]] = load i8, i8* [[UCE]], align 1
2270 // CHECK-NEXT:    [[TMP253:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP252]] release, align 1
2271 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2272 // CHECK-NEXT:    [[TMP254:%.*]] = load i8, i8* [[UCE]], align 1
2273 // CHECK-NEXT:    [[TMP255:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP254]] release, align 1
2274 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2275 // CHECK-NEXT:    [[TMP256:%.*]] = load i8, i8* [[UCE]], align 1
2276 // CHECK-NEXT:    [[TMP257:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP256]] release, align 1
2277 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2278 // CHECK-NEXT:    [[TMP258:%.*]] = load i8, i8* [[UCE]], align 1
2279 // CHECK-NEXT:    [[TMP259:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP258]] release, align 1
2280 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2281 // CHECK-NEXT:    [[TMP260:%.*]] = load i8, i8* [[UCE]], align 1
2282 // CHECK-NEXT:    [[TMP261:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP260]] release, align 1
2283 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2284 // CHECK-NEXT:    [[TMP262:%.*]] = load i8, i8* [[UCE]], align 1
2285 // CHECK-NEXT:    [[TMP263:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP262]] release, align 1
2286 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2287 // CHECK-NEXT:    [[TMP264:%.*]] = load i8, i8* [[UCE]], align 1
2288 // CHECK-NEXT:    [[TMP265:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP264]] release, align 1
2289 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2290 // CHECK-NEXT:    [[TMP266:%.*]] = load i8, i8* [[UCE]], align 1
2291 // CHECK-NEXT:    [[TMP267:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP266]] release, align 1
2292 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2293 // CHECK-NEXT:    [[TMP268:%.*]] = load i8, i8* [[UCE]], align 1
2294 // CHECK-NEXT:    [[TMP269:%.*]] = load i8, i8* [[UCD]], align 1
2295 // CHECK-NEXT:    [[TMP270:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP268]], i8 [[TMP269]] release monotonic, align 1
2296 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2297 // CHECK-NEXT:    [[TMP271:%.*]] = load i8, i8* [[UCE]], align 1
2298 // CHECK-NEXT:    [[TMP272:%.*]] = load i8, i8* [[UCD]], align 1
2299 // CHECK-NEXT:    [[TMP273:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP271]], i8 [[TMP272]] release monotonic, align 1
2300 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2301 // CHECK-NEXT:    [[TMP274:%.*]] = load i8, i8* [[UCE]], align 1
2302 // CHECK-NEXT:    [[TMP275:%.*]] = load i8, i8* [[UCD]], align 1
2303 // CHECK-NEXT:    [[TMP276:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP274]], i8 [[TMP275]] release monotonic, align 1
2304 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2305 // CHECK-NEXT:    [[TMP277:%.*]] = load i8, i8* [[UCE]], align 1
2306 // CHECK-NEXT:    [[TMP278:%.*]] = load i8, i8* [[UCD]], align 1
2307 // CHECK-NEXT:    [[TMP279:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP277]], i8 [[TMP278]] release monotonic, align 1
2308 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2309 // CHECK-NEXT:    [[TMP280:%.*]] = load i8, i8* [[CE]], align 1
2310 // CHECK-NEXT:    [[TMP281:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP280]] seq_cst, align 1
2311 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2312 // CHECK-NEXT:    [[TMP282:%.*]] = load i8, i8* [[CE]], align 1
2313 // CHECK-NEXT:    [[TMP283:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP282]] seq_cst, align 1
2314 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2315 // CHECK-NEXT:    [[TMP284:%.*]] = load i8, i8* [[CE]], align 1
2316 // CHECK-NEXT:    [[TMP285:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP284]] seq_cst, align 1
2317 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2318 // CHECK-NEXT:    [[TMP286:%.*]] = load i8, i8* [[CE]], align 1
2319 // CHECK-NEXT:    [[TMP287:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP286]] seq_cst, align 1
2320 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2321 // CHECK-NEXT:    [[TMP288:%.*]] = load i8, i8* [[CE]], align 1
2322 // CHECK-NEXT:    [[TMP289:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP288]] seq_cst, align 1
2323 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2324 // CHECK-NEXT:    [[TMP290:%.*]] = load i8, i8* [[CE]], align 1
2325 // CHECK-NEXT:    [[TMP291:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP290]] seq_cst, align 1
2326 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2327 // CHECK-NEXT:    [[TMP292:%.*]] = load i8, i8* [[CE]], align 1
2328 // CHECK-NEXT:    [[TMP293:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP292]] seq_cst, align 1
2329 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2330 // CHECK-NEXT:    [[TMP294:%.*]] = load i8, i8* [[CE]], align 1
2331 // CHECK-NEXT:    [[TMP295:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP294]] seq_cst, align 1
2332 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2333 // CHECK-NEXT:    [[TMP296:%.*]] = load i8, i8* [[CE]], align 1
2334 // CHECK-NEXT:    [[TMP297:%.*]] = load i8, i8* [[CD]], align 1
2335 // CHECK-NEXT:    [[TMP298:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP296]], i8 [[TMP297]] seq_cst seq_cst, align 1
2336 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2337 // CHECK-NEXT:    [[TMP299:%.*]] = load i8, i8* [[CE]], align 1
2338 // CHECK-NEXT:    [[TMP300:%.*]] = load i8, i8* [[CD]], align 1
2339 // CHECK-NEXT:    [[TMP301:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP299]], i8 [[TMP300]] seq_cst seq_cst, align 1
2340 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2341 // CHECK-NEXT:    [[TMP302:%.*]] = load i8, i8* [[CE]], align 1
2342 // CHECK-NEXT:    [[TMP303:%.*]] = load i8, i8* [[CD]], align 1
2343 // CHECK-NEXT:    [[TMP304:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP302]], i8 [[TMP303]] seq_cst seq_cst, align 1
2344 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2345 // CHECK-NEXT:    [[TMP305:%.*]] = load i8, i8* [[CE]], align 1
2346 // CHECK-NEXT:    [[TMP306:%.*]] = load i8, i8* [[CD]], align 1
2347 // CHECK-NEXT:    [[TMP307:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP305]], i8 [[TMP306]] seq_cst seq_cst, align 1
2348 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2349 // CHECK-NEXT:    [[TMP308:%.*]] = load i8, i8* [[UCE]], align 1
2350 // CHECK-NEXT:    [[TMP309:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP308]] seq_cst, align 1
2351 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2352 // CHECK-NEXT:    [[TMP310:%.*]] = load i8, i8* [[UCE]], align 1
2353 // CHECK-NEXT:    [[TMP311:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP310]] seq_cst, align 1
2354 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2355 // CHECK-NEXT:    [[TMP312:%.*]] = load i8, i8* [[UCE]], align 1
2356 // CHECK-NEXT:    [[TMP313:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP312]] seq_cst, align 1
2357 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2358 // CHECK-NEXT:    [[TMP314:%.*]] = load i8, i8* [[UCE]], align 1
2359 // CHECK-NEXT:    [[TMP315:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP314]] seq_cst, align 1
2360 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2361 // CHECK-NEXT:    [[TMP316:%.*]] = load i8, i8* [[UCE]], align 1
2362 // CHECK-NEXT:    [[TMP317:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP316]] seq_cst, align 1
2363 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2364 // CHECK-NEXT:    [[TMP318:%.*]] = load i8, i8* [[UCE]], align 1
2365 // CHECK-NEXT:    [[TMP319:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP318]] seq_cst, align 1
2366 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2367 // CHECK-NEXT:    [[TMP320:%.*]] = load i8, i8* [[UCE]], align 1
2368 // CHECK-NEXT:    [[TMP321:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP320]] seq_cst, align 1
2369 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2370 // CHECK-NEXT:    [[TMP322:%.*]] = load i8, i8* [[UCE]], align 1
2371 // CHECK-NEXT:    [[TMP323:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP322]] seq_cst, align 1
2372 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2373 // CHECK-NEXT:    [[TMP324:%.*]] = load i8, i8* [[UCE]], align 1
2374 // CHECK-NEXT:    [[TMP325:%.*]] = load i8, i8* [[UCD]], align 1
2375 // CHECK-NEXT:    [[TMP326:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP324]], i8 [[TMP325]] seq_cst seq_cst, align 1
2376 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2377 // CHECK-NEXT:    [[TMP327:%.*]] = load i8, i8* [[UCE]], align 1
2378 // CHECK-NEXT:    [[TMP328:%.*]] = load i8, i8* [[UCD]], align 1
2379 // CHECK-NEXT:    [[TMP329:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP327]], i8 [[TMP328]] seq_cst seq_cst, align 1
2380 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2381 // CHECK-NEXT:    [[TMP330:%.*]] = load i8, i8* [[UCE]], align 1
2382 // CHECK-NEXT:    [[TMP331:%.*]] = load i8, i8* [[UCD]], align 1
2383 // CHECK-NEXT:    [[TMP332:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP330]], i8 [[TMP331]] seq_cst seq_cst, align 1
2384 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2385 // CHECK-NEXT:    [[TMP333:%.*]] = load i8, i8* [[UCE]], align 1
2386 // CHECK-NEXT:    [[TMP334:%.*]] = load i8, i8* [[UCD]], align 1
2387 // CHECK-NEXT:    [[TMP335:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP333]], i8 [[TMP334]] seq_cst seq_cst, align 1
2388 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2389 // CHECK-NEXT:    [[TMP336:%.*]] = load i16, i16* [[SE]], align 2
2390 // CHECK-NEXT:    [[TMP337:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP336]] monotonic, align 2
2391 // CHECK-NEXT:    [[TMP338:%.*]] = load i16, i16* [[SE]], align 2
2392 // CHECK-NEXT:    [[TMP339:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP338]] monotonic, align 2
2393 // CHECK-NEXT:    [[TMP340:%.*]] = load i16, i16* [[SE]], align 2
2394 // CHECK-NEXT:    [[TMP341:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP340]] monotonic, align 2
2395 // CHECK-NEXT:    [[TMP342:%.*]] = load i16, i16* [[SE]], align 2
2396 // CHECK-NEXT:    [[TMP343:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP342]] monotonic, align 2
2397 // CHECK-NEXT:    [[TMP344:%.*]] = load i16, i16* [[SE]], align 2
2398 // CHECK-NEXT:    [[TMP345:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP344]] monotonic, align 2
2399 // CHECK-NEXT:    [[TMP346:%.*]] = load i16, i16* [[SE]], align 2
2400 // CHECK-NEXT:    [[TMP347:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP346]] monotonic, align 2
2401 // CHECK-NEXT:    [[TMP348:%.*]] = load i16, i16* [[SE]], align 2
2402 // CHECK-NEXT:    [[TMP349:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP348]] monotonic, align 2
2403 // CHECK-NEXT:    [[TMP350:%.*]] = load i16, i16* [[SE]], align 2
2404 // CHECK-NEXT:    [[TMP351:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP350]] monotonic, align 2
2405 // CHECK-NEXT:    [[TMP352:%.*]] = load i16, i16* [[SE]], align 2
2406 // CHECK-NEXT:    [[TMP353:%.*]] = load i16, i16* [[SD]], align 2
2407 // CHECK-NEXT:    [[TMP354:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP352]], i16 [[TMP353]] monotonic monotonic, align 2
2408 // CHECK-NEXT:    [[TMP355:%.*]] = load i16, i16* [[SE]], align 2
2409 // CHECK-NEXT:    [[TMP356:%.*]] = load i16, i16* [[SD]], align 2
2410 // CHECK-NEXT:    [[TMP357:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP355]], i16 [[TMP356]] monotonic monotonic, align 2
2411 // CHECK-NEXT:    [[TMP358:%.*]] = load i16, i16* [[SE]], align 2
2412 // CHECK-NEXT:    [[TMP359:%.*]] = load i16, i16* [[SD]], align 2
2413 // CHECK-NEXT:    [[TMP360:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP358]], i16 [[TMP359]] monotonic monotonic, align 2
2414 // CHECK-NEXT:    [[TMP361:%.*]] = load i16, i16* [[SE]], align 2
2415 // CHECK-NEXT:    [[TMP362:%.*]] = load i16, i16* [[SD]], align 2
2416 // CHECK-NEXT:    [[TMP363:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP361]], i16 [[TMP362]] monotonic monotonic, align 2
2417 // CHECK-NEXT:    [[TMP364:%.*]] = load i16, i16* [[USE]], align 2
2418 // CHECK-NEXT:    [[TMP365:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP364]] monotonic, align 2
2419 // CHECK-NEXT:    [[TMP366:%.*]] = load i16, i16* [[USE]], align 2
2420 // CHECK-NEXT:    [[TMP367:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP366]] monotonic, align 2
2421 // CHECK-NEXT:    [[TMP368:%.*]] = load i16, i16* [[USE]], align 2
2422 // CHECK-NEXT:    [[TMP369:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP368]] monotonic, align 2
2423 // CHECK-NEXT:    [[TMP370:%.*]] = load i16, i16* [[USE]], align 2
2424 // CHECK-NEXT:    [[TMP371:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP370]] monotonic, align 2
2425 // CHECK-NEXT:    [[TMP372:%.*]] = load i16, i16* [[USE]], align 2
2426 // CHECK-NEXT:    [[TMP373:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP372]] monotonic, align 2
2427 // CHECK-NEXT:    [[TMP374:%.*]] = load i16, i16* [[USE]], align 2
2428 // CHECK-NEXT:    [[TMP375:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP374]] monotonic, align 2
2429 // CHECK-NEXT:    [[TMP376:%.*]] = load i16, i16* [[USE]], align 2
2430 // CHECK-NEXT:    [[TMP377:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP376]] monotonic, align 2
2431 // CHECK-NEXT:    [[TMP378:%.*]] = load i16, i16* [[USE]], align 2
2432 // CHECK-NEXT:    [[TMP379:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP378]] monotonic, align 2
2433 // CHECK-NEXT:    [[TMP380:%.*]] = load i16, i16* [[USE]], align 2
2434 // CHECK-NEXT:    [[TMP381:%.*]] = load i16, i16* [[USD]], align 2
2435 // CHECK-NEXT:    [[TMP382:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP380]], i16 [[TMP381]] monotonic monotonic, align 2
2436 // CHECK-NEXT:    [[TMP383:%.*]] = load i16, i16* [[USE]], align 2
2437 // CHECK-NEXT:    [[TMP384:%.*]] = load i16, i16* [[USD]], align 2
2438 // CHECK-NEXT:    [[TMP385:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP383]], i16 [[TMP384]] monotonic monotonic, align 2
2439 // CHECK-NEXT:    [[TMP386:%.*]] = load i16, i16* [[USE]], align 2
2440 // CHECK-NEXT:    [[TMP387:%.*]] = load i16, i16* [[USD]], align 2
2441 // CHECK-NEXT:    [[TMP388:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP386]], i16 [[TMP387]] monotonic monotonic, align 2
2442 // CHECK-NEXT:    [[TMP389:%.*]] = load i16, i16* [[USE]], align 2
2443 // CHECK-NEXT:    [[TMP390:%.*]] = load i16, i16* [[USD]], align 2
2444 // CHECK-NEXT:    [[TMP391:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP389]], i16 [[TMP390]] monotonic monotonic, align 2
2445 // CHECK-NEXT:    [[TMP392:%.*]] = load i16, i16* [[SE]], align 2
2446 // CHECK-NEXT:    [[TMP393:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP392]] acq_rel, align 2
2447 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2448 // CHECK-NEXT:    [[TMP394:%.*]] = load i16, i16* [[SE]], align 2
2449 // CHECK-NEXT:    [[TMP395:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP394]] acq_rel, align 2
2450 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2451 // CHECK-NEXT:    [[TMP396:%.*]] = load i16, i16* [[SE]], align 2
2452 // CHECK-NEXT:    [[TMP397:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP396]] acq_rel, align 2
2453 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2454 // CHECK-NEXT:    [[TMP398:%.*]] = load i16, i16* [[SE]], align 2
2455 // CHECK-NEXT:    [[TMP399:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP398]] acq_rel, align 2
2456 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2457 // CHECK-NEXT:    [[TMP400:%.*]] = load i16, i16* [[SE]], align 2
2458 // CHECK-NEXT:    [[TMP401:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP400]] acq_rel, align 2
2459 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2460 // CHECK-NEXT:    [[TMP402:%.*]] = load i16, i16* [[SE]], align 2
2461 // CHECK-NEXT:    [[TMP403:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP402]] acq_rel, align 2
2462 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2463 // CHECK-NEXT:    [[TMP404:%.*]] = load i16, i16* [[SE]], align 2
2464 // CHECK-NEXT:    [[TMP405:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP404]] acq_rel, align 2
2465 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2466 // CHECK-NEXT:    [[TMP406:%.*]] = load i16, i16* [[SE]], align 2
2467 // CHECK-NEXT:    [[TMP407:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP406]] acq_rel, align 2
2468 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2469 // CHECK-NEXT:    [[TMP408:%.*]] = load i16, i16* [[SE]], align 2
2470 // CHECK-NEXT:    [[TMP409:%.*]] = load i16, i16* [[SD]], align 2
2471 // CHECK-NEXT:    [[TMP410:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP408]], i16 [[TMP409]] acq_rel acquire, align 2
2472 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2473 // CHECK-NEXT:    [[TMP411:%.*]] = load i16, i16* [[SE]], align 2
2474 // CHECK-NEXT:    [[TMP412:%.*]] = load i16, i16* [[SD]], align 2
2475 // CHECK-NEXT:    [[TMP413:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP411]], i16 [[TMP412]] acq_rel acquire, align 2
2476 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2477 // CHECK-NEXT:    [[TMP414:%.*]] = load i16, i16* [[SE]], align 2
2478 // CHECK-NEXT:    [[TMP415:%.*]] = load i16, i16* [[SD]], align 2
2479 // CHECK-NEXT:    [[TMP416:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP414]], i16 [[TMP415]] acq_rel acquire, align 2
2480 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2481 // CHECK-NEXT:    [[TMP417:%.*]] = load i16, i16* [[SE]], align 2
2482 // CHECK-NEXT:    [[TMP418:%.*]] = load i16, i16* [[SD]], align 2
2483 // CHECK-NEXT:    [[TMP419:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP417]], i16 [[TMP418]] acq_rel acquire, align 2
2484 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2485 // CHECK-NEXT:    [[TMP420:%.*]] = load i16, i16* [[USE]], align 2
2486 // CHECK-NEXT:    [[TMP421:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP420]] acq_rel, align 2
2487 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2488 // CHECK-NEXT:    [[TMP422:%.*]] = load i16, i16* [[USE]], align 2
2489 // CHECK-NEXT:    [[TMP423:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP422]] acq_rel, align 2
2490 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2491 // CHECK-NEXT:    [[TMP424:%.*]] = load i16, i16* [[USE]], align 2
2492 // CHECK-NEXT:    [[TMP425:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP424]] acq_rel, align 2
2493 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2494 // CHECK-NEXT:    [[TMP426:%.*]] = load i16, i16* [[USE]], align 2
2495 // CHECK-NEXT:    [[TMP427:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP426]] acq_rel, align 2
2496 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2497 // CHECK-NEXT:    [[TMP428:%.*]] = load i16, i16* [[USE]], align 2
2498 // CHECK-NEXT:    [[TMP429:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP428]] acq_rel, align 2
2499 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2500 // CHECK-NEXT:    [[TMP430:%.*]] = load i16, i16* [[USE]], align 2
2501 // CHECK-NEXT:    [[TMP431:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP430]] acq_rel, align 2
2502 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2503 // CHECK-NEXT:    [[TMP432:%.*]] = load i16, i16* [[USE]], align 2
2504 // CHECK-NEXT:    [[TMP433:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP432]] acq_rel, align 2
2505 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2506 // CHECK-NEXT:    [[TMP434:%.*]] = load i16, i16* [[USE]], align 2
2507 // CHECK-NEXT:    [[TMP435:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP434]] acq_rel, align 2
2508 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2509 // CHECK-NEXT:    [[TMP436:%.*]] = load i16, i16* [[USE]], align 2
2510 // CHECK-NEXT:    [[TMP437:%.*]] = load i16, i16* [[USD]], align 2
2511 // CHECK-NEXT:    [[TMP438:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP436]], i16 [[TMP437]] acq_rel acquire, align 2
2512 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2513 // CHECK-NEXT:    [[TMP439:%.*]] = load i16, i16* [[USE]], align 2
2514 // CHECK-NEXT:    [[TMP440:%.*]] = load i16, i16* [[USD]], align 2
2515 // CHECK-NEXT:    [[TMP441:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP439]], i16 [[TMP440]] acq_rel acquire, align 2
2516 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2517 // CHECK-NEXT:    [[TMP442:%.*]] = load i16, i16* [[USE]], align 2
2518 // CHECK-NEXT:    [[TMP443:%.*]] = load i16, i16* [[USD]], align 2
2519 // CHECK-NEXT:    [[TMP444:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP442]], i16 [[TMP443]] acq_rel acquire, align 2
2520 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2521 // CHECK-NEXT:    [[TMP445:%.*]] = load i16, i16* [[USE]], align 2
2522 // CHECK-NEXT:    [[TMP446:%.*]] = load i16, i16* [[USD]], align 2
2523 // CHECK-NEXT:    [[TMP447:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP445]], i16 [[TMP446]] acq_rel acquire, align 2
2524 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2525 // CHECK-NEXT:    [[TMP448:%.*]] = load i16, i16* [[SE]], align 2
2526 // CHECK-NEXT:    [[TMP449:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP448]] acquire, align 2
2527 // CHECK-NEXT:    [[TMP450:%.*]] = load i16, i16* [[SE]], align 2
2528 // CHECK-NEXT:    [[TMP451:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP450]] acquire, align 2
2529 // CHECK-NEXT:    [[TMP452:%.*]] = load i16, i16* [[SE]], align 2
2530 // CHECK-NEXT:    [[TMP453:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP452]] acquire, align 2
2531 // CHECK-NEXT:    [[TMP454:%.*]] = load i16, i16* [[SE]], align 2
2532 // CHECK-NEXT:    [[TMP455:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP454]] acquire, align 2
2533 // CHECK-NEXT:    [[TMP456:%.*]] = load i16, i16* [[SE]], align 2
2534 // CHECK-NEXT:    [[TMP457:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP456]] acquire, align 2
2535 // CHECK-NEXT:    [[TMP458:%.*]] = load i16, i16* [[SE]], align 2
2536 // CHECK-NEXT:    [[TMP459:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP458]] acquire, align 2
2537 // CHECK-NEXT:    [[TMP460:%.*]] = load i16, i16* [[SE]], align 2
2538 // CHECK-NEXT:    [[TMP461:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP460]] acquire, align 2
2539 // CHECK-NEXT:    [[TMP462:%.*]] = load i16, i16* [[SE]], align 2
2540 // CHECK-NEXT:    [[TMP463:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP462]] acquire, align 2
2541 // CHECK-NEXT:    [[TMP464:%.*]] = load i16, i16* [[SE]], align 2
2542 // CHECK-NEXT:    [[TMP465:%.*]] = load i16, i16* [[SD]], align 2
2543 // CHECK-NEXT:    [[TMP466:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP464]], i16 [[TMP465]] acquire acquire, align 2
2544 // CHECK-NEXT:    [[TMP467:%.*]] = load i16, i16* [[SE]], align 2
2545 // CHECK-NEXT:    [[TMP468:%.*]] = load i16, i16* [[SD]], align 2
2546 // CHECK-NEXT:    [[TMP469:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP467]], i16 [[TMP468]] acquire acquire, align 2
2547 // CHECK-NEXT:    [[TMP470:%.*]] = load i16, i16* [[SE]], align 2
2548 // CHECK-NEXT:    [[TMP471:%.*]] = load i16, i16* [[SD]], align 2
2549 // CHECK-NEXT:    [[TMP472:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP470]], i16 [[TMP471]] acquire acquire, align 2
2550 // CHECK-NEXT:    [[TMP473:%.*]] = load i16, i16* [[SE]], align 2
2551 // CHECK-NEXT:    [[TMP474:%.*]] = load i16, i16* [[SD]], align 2
2552 // CHECK-NEXT:    [[TMP475:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP473]], i16 [[TMP474]] acquire acquire, align 2
2553 // CHECK-NEXT:    [[TMP476:%.*]] = load i16, i16* [[USE]], align 2
2554 // CHECK-NEXT:    [[TMP477:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP476]] acquire, align 2
2555 // CHECK-NEXT:    [[TMP478:%.*]] = load i16, i16* [[USE]], align 2
2556 // CHECK-NEXT:    [[TMP479:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP478]] acquire, align 2
2557 // CHECK-NEXT:    [[TMP480:%.*]] = load i16, i16* [[USE]], align 2
2558 // CHECK-NEXT:    [[TMP481:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP480]] acquire, align 2
2559 // CHECK-NEXT:    [[TMP482:%.*]] = load i16, i16* [[USE]], align 2
2560 // CHECK-NEXT:    [[TMP483:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP482]] acquire, align 2
2561 // CHECK-NEXT:    [[TMP484:%.*]] = load i16, i16* [[USE]], align 2
2562 // CHECK-NEXT:    [[TMP485:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP484]] acquire, align 2
2563 // CHECK-NEXT:    [[TMP486:%.*]] = load i16, i16* [[USE]], align 2
2564 // CHECK-NEXT:    [[TMP487:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP486]] acquire, align 2
2565 // CHECK-NEXT:    [[TMP488:%.*]] = load i16, i16* [[USE]], align 2
2566 // CHECK-NEXT:    [[TMP489:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP488]] acquire, align 2
2567 // CHECK-NEXT:    [[TMP490:%.*]] = load i16, i16* [[USE]], align 2
2568 // CHECK-NEXT:    [[TMP491:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP490]] acquire, align 2
2569 // CHECK-NEXT:    [[TMP492:%.*]] = load i16, i16* [[USE]], align 2
2570 // CHECK-NEXT:    [[TMP493:%.*]] = load i16, i16* [[USD]], align 2
2571 // CHECK-NEXT:    [[TMP494:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP492]], i16 [[TMP493]] acquire acquire, align 2
2572 // CHECK-NEXT:    [[TMP495:%.*]] = load i16, i16* [[USE]], align 2
2573 // CHECK-NEXT:    [[TMP496:%.*]] = load i16, i16* [[USD]], align 2
2574 // CHECK-NEXT:    [[TMP497:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP495]], i16 [[TMP496]] acquire acquire, align 2
2575 // CHECK-NEXT:    [[TMP498:%.*]] = load i16, i16* [[USE]], align 2
2576 // CHECK-NEXT:    [[TMP499:%.*]] = load i16, i16* [[USD]], align 2
2577 // CHECK-NEXT:    [[TMP500:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP498]], i16 [[TMP499]] acquire acquire, align 2
2578 // CHECK-NEXT:    [[TMP501:%.*]] = load i16, i16* [[USE]], align 2
2579 // CHECK-NEXT:    [[TMP502:%.*]] = load i16, i16* [[USD]], align 2
2580 // CHECK-NEXT:    [[TMP503:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP501]], i16 [[TMP502]] acquire acquire, align 2
2581 // CHECK-NEXT:    [[TMP504:%.*]] = load i16, i16* [[SE]], align 2
2582 // CHECK-NEXT:    [[TMP505:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP504]] monotonic, align 2
2583 // CHECK-NEXT:    [[TMP506:%.*]] = load i16, i16* [[SE]], align 2
2584 // CHECK-NEXT:    [[TMP507:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP506]] monotonic, align 2
2585 // CHECK-NEXT:    [[TMP508:%.*]] = load i16, i16* [[SE]], align 2
2586 // CHECK-NEXT:    [[TMP509:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP508]] monotonic, align 2
2587 // CHECK-NEXT:    [[TMP510:%.*]] = load i16, i16* [[SE]], align 2
2588 // CHECK-NEXT:    [[TMP511:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP510]] monotonic, align 2
2589 // CHECK-NEXT:    [[TMP512:%.*]] = load i16, i16* [[SE]], align 2
2590 // CHECK-NEXT:    [[TMP513:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP512]] monotonic, align 2
2591 // CHECK-NEXT:    [[TMP514:%.*]] = load i16, i16* [[SE]], align 2
2592 // CHECK-NEXT:    [[TMP515:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP514]] monotonic, align 2
2593 // CHECK-NEXT:    [[TMP516:%.*]] = load i16, i16* [[SE]], align 2
2594 // CHECK-NEXT:    [[TMP517:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP516]] monotonic, align 2
2595 // CHECK-NEXT:    [[TMP518:%.*]] = load i16, i16* [[SE]], align 2
2596 // CHECK-NEXT:    [[TMP519:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP518]] monotonic, align 2
2597 // CHECK-NEXT:    [[TMP520:%.*]] = load i16, i16* [[SE]], align 2
2598 // CHECK-NEXT:    [[TMP521:%.*]] = load i16, i16* [[SD]], align 2
2599 // CHECK-NEXT:    [[TMP522:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP520]], i16 [[TMP521]] monotonic monotonic, align 2
2600 // CHECK-NEXT:    [[TMP523:%.*]] = load i16, i16* [[SE]], align 2
2601 // CHECK-NEXT:    [[TMP524:%.*]] = load i16, i16* [[SD]], align 2
2602 // CHECK-NEXT:    [[TMP525:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP523]], i16 [[TMP524]] monotonic monotonic, align 2
2603 // CHECK-NEXT:    [[TMP526:%.*]] = load i16, i16* [[SE]], align 2
2604 // CHECK-NEXT:    [[TMP527:%.*]] = load i16, i16* [[SD]], align 2
2605 // CHECK-NEXT:    [[TMP528:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP526]], i16 [[TMP527]] monotonic monotonic, align 2
2606 // CHECK-NEXT:    [[TMP529:%.*]] = load i16, i16* [[SE]], align 2
2607 // CHECK-NEXT:    [[TMP530:%.*]] = load i16, i16* [[SD]], align 2
2608 // CHECK-NEXT:    [[TMP531:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP529]], i16 [[TMP530]] monotonic monotonic, align 2
2609 // CHECK-NEXT:    [[TMP532:%.*]] = load i16, i16* [[USE]], align 2
2610 // CHECK-NEXT:    [[TMP533:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP532]] monotonic, align 2
2611 // CHECK-NEXT:    [[TMP534:%.*]] = load i16, i16* [[USE]], align 2
2612 // CHECK-NEXT:    [[TMP535:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP534]] monotonic, align 2
2613 // CHECK-NEXT:    [[TMP536:%.*]] = load i16, i16* [[USE]], align 2
2614 // CHECK-NEXT:    [[TMP537:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP536]] monotonic, align 2
2615 // CHECK-NEXT:    [[TMP538:%.*]] = load i16, i16* [[USE]], align 2
2616 // CHECK-NEXT:    [[TMP539:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP538]] monotonic, align 2
2617 // CHECK-NEXT:    [[TMP540:%.*]] = load i16, i16* [[USE]], align 2
2618 // CHECK-NEXT:    [[TMP541:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP540]] monotonic, align 2
2619 // CHECK-NEXT:    [[TMP542:%.*]] = load i16, i16* [[USE]], align 2
2620 // CHECK-NEXT:    [[TMP543:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP542]] monotonic, align 2
2621 // CHECK-NEXT:    [[TMP544:%.*]] = load i16, i16* [[USE]], align 2
2622 // CHECK-NEXT:    [[TMP545:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP544]] monotonic, align 2
2623 // CHECK-NEXT:    [[TMP546:%.*]] = load i16, i16* [[USE]], align 2
2624 // CHECK-NEXT:    [[TMP547:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP546]] monotonic, align 2
2625 // CHECK-NEXT:    [[TMP548:%.*]] = load i16, i16* [[USE]], align 2
2626 // CHECK-NEXT:    [[TMP549:%.*]] = load i16, i16* [[USD]], align 2
2627 // CHECK-NEXT:    [[TMP550:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP548]], i16 [[TMP549]] monotonic monotonic, align 2
2628 // CHECK-NEXT:    [[TMP551:%.*]] = load i16, i16* [[USE]], align 2
2629 // CHECK-NEXT:    [[TMP552:%.*]] = load i16, i16* [[USD]], align 2
2630 // CHECK-NEXT:    [[TMP553:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP551]], i16 [[TMP552]] monotonic monotonic, align 2
2631 // CHECK-NEXT:    [[TMP554:%.*]] = load i16, i16* [[USE]], align 2
2632 // CHECK-NEXT:    [[TMP555:%.*]] = load i16, i16* [[USD]], align 2
2633 // CHECK-NEXT:    [[TMP556:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP554]], i16 [[TMP555]] monotonic monotonic, align 2
2634 // CHECK-NEXT:    [[TMP557:%.*]] = load i16, i16* [[USE]], align 2
2635 // CHECK-NEXT:    [[TMP558:%.*]] = load i16, i16* [[USD]], align 2
2636 // CHECK-NEXT:    [[TMP559:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP557]], i16 [[TMP558]] monotonic monotonic, align 2
2637 // CHECK-NEXT:    [[TMP560:%.*]] = load i16, i16* [[SE]], align 2
2638 // CHECK-NEXT:    [[TMP561:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP560]] release, align 2
2639 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2640 // CHECK-NEXT:    [[TMP562:%.*]] = load i16, i16* [[SE]], align 2
2641 // CHECK-NEXT:    [[TMP563:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP562]] release, align 2
2642 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2643 // CHECK-NEXT:    [[TMP564:%.*]] = load i16, i16* [[SE]], align 2
2644 // CHECK-NEXT:    [[TMP565:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP564]] release, align 2
2645 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2646 // CHECK-NEXT:    [[TMP566:%.*]] = load i16, i16* [[SE]], align 2
2647 // CHECK-NEXT:    [[TMP567:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP566]] release, align 2
2648 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2649 // CHECK-NEXT:    [[TMP568:%.*]] = load i16, i16* [[SE]], align 2
2650 // CHECK-NEXT:    [[TMP569:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP568]] release, align 2
2651 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2652 // CHECK-NEXT:    [[TMP570:%.*]] = load i16, i16* [[SE]], align 2
2653 // CHECK-NEXT:    [[TMP571:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP570]] release, align 2
2654 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2655 // CHECK-NEXT:    [[TMP572:%.*]] = load i16, i16* [[SE]], align 2
2656 // CHECK-NEXT:    [[TMP573:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP572]] release, align 2
2657 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2658 // CHECK-NEXT:    [[TMP574:%.*]] = load i16, i16* [[SE]], align 2
2659 // CHECK-NEXT:    [[TMP575:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP574]] release, align 2
2660 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2661 // CHECK-NEXT:    [[TMP576:%.*]] = load i16, i16* [[SE]], align 2
2662 // CHECK-NEXT:    [[TMP577:%.*]] = load i16, i16* [[SD]], align 2
2663 // CHECK-NEXT:    [[TMP578:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP576]], i16 [[TMP577]] release monotonic, align 2
2664 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2665 // CHECK-NEXT:    [[TMP579:%.*]] = load i16, i16* [[SE]], align 2
2666 // CHECK-NEXT:    [[TMP580:%.*]] = load i16, i16* [[SD]], align 2
2667 // CHECK-NEXT:    [[TMP581:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP579]], i16 [[TMP580]] release monotonic, align 2
2668 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2669 // CHECK-NEXT:    [[TMP582:%.*]] = load i16, i16* [[SE]], align 2
2670 // CHECK-NEXT:    [[TMP583:%.*]] = load i16, i16* [[SD]], align 2
2671 // CHECK-NEXT:    [[TMP584:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP582]], i16 [[TMP583]] release monotonic, align 2
2672 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2673 // CHECK-NEXT:    [[TMP585:%.*]] = load i16, i16* [[SE]], align 2
2674 // CHECK-NEXT:    [[TMP586:%.*]] = load i16, i16* [[SD]], align 2
2675 // CHECK-NEXT:    [[TMP587:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP585]], i16 [[TMP586]] release monotonic, align 2
2676 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2677 // CHECK-NEXT:    [[TMP588:%.*]] = load i16, i16* [[USE]], align 2
2678 // CHECK-NEXT:    [[TMP589:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP588]] release, align 2
2679 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2680 // CHECK-NEXT:    [[TMP590:%.*]] = load i16, i16* [[USE]], align 2
2681 // CHECK-NEXT:    [[TMP591:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP590]] release, align 2
2682 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2683 // CHECK-NEXT:    [[TMP592:%.*]] = load i16, i16* [[USE]], align 2
2684 // CHECK-NEXT:    [[TMP593:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP592]] release, align 2
2685 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2686 // CHECK-NEXT:    [[TMP594:%.*]] = load i16, i16* [[USE]], align 2
2687 // CHECK-NEXT:    [[TMP595:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP594]] release, align 2
2688 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2689 // CHECK-NEXT:    [[TMP596:%.*]] = load i16, i16* [[USE]], align 2
2690 // CHECK-NEXT:    [[TMP597:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP596]] release, align 2
2691 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2692 // CHECK-NEXT:    [[TMP598:%.*]] = load i16, i16* [[USE]], align 2
2693 // CHECK-NEXT:    [[TMP599:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP598]] release, align 2
2694 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2695 // CHECK-NEXT:    [[TMP600:%.*]] = load i16, i16* [[USE]], align 2
2696 // CHECK-NEXT:    [[TMP601:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP600]] release, align 2
2697 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2698 // CHECK-NEXT:    [[TMP602:%.*]] = load i16, i16* [[USE]], align 2
2699 // CHECK-NEXT:    [[TMP603:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP602]] release, align 2
2700 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2701 // CHECK-NEXT:    [[TMP604:%.*]] = load i16, i16* [[USE]], align 2
2702 // CHECK-NEXT:    [[TMP605:%.*]] = load i16, i16* [[USD]], align 2
2703 // CHECK-NEXT:    [[TMP606:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP604]], i16 [[TMP605]] release monotonic, align 2
2704 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2705 // CHECK-NEXT:    [[TMP607:%.*]] = load i16, i16* [[USE]], align 2
2706 // CHECK-NEXT:    [[TMP608:%.*]] = load i16, i16* [[USD]], align 2
2707 // CHECK-NEXT:    [[TMP609:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP607]], i16 [[TMP608]] release monotonic, align 2
2708 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2709 // CHECK-NEXT:    [[TMP610:%.*]] = load i16, i16* [[USE]], align 2
2710 // CHECK-NEXT:    [[TMP611:%.*]] = load i16, i16* [[USD]], align 2
2711 // CHECK-NEXT:    [[TMP612:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP610]], i16 [[TMP611]] release monotonic, align 2
2712 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2713 // CHECK-NEXT:    [[TMP613:%.*]] = load i16, i16* [[USE]], align 2
2714 // CHECK-NEXT:    [[TMP614:%.*]] = load i16, i16* [[USD]], align 2
2715 // CHECK-NEXT:    [[TMP615:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP613]], i16 [[TMP614]] release monotonic, align 2
2716 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2717 // CHECK-NEXT:    [[TMP616:%.*]] = load i16, i16* [[SE]], align 2
2718 // CHECK-NEXT:    [[TMP617:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP616]] seq_cst, align 2
2719 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2720 // CHECK-NEXT:    [[TMP618:%.*]] = load i16, i16* [[SE]], align 2
2721 // CHECK-NEXT:    [[TMP619:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP618]] seq_cst, align 2
2722 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2723 // CHECK-NEXT:    [[TMP620:%.*]] = load i16, i16* [[SE]], align 2
2724 // CHECK-NEXT:    [[TMP621:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP620]] seq_cst, align 2
2725 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2726 // CHECK-NEXT:    [[TMP622:%.*]] = load i16, i16* [[SE]], align 2
2727 // CHECK-NEXT:    [[TMP623:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP622]] seq_cst, align 2
2728 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2729 // CHECK-NEXT:    [[TMP624:%.*]] = load i16, i16* [[SE]], align 2
2730 // CHECK-NEXT:    [[TMP625:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP624]] seq_cst, align 2
2731 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2732 // CHECK-NEXT:    [[TMP626:%.*]] = load i16, i16* [[SE]], align 2
2733 // CHECK-NEXT:    [[TMP627:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP626]] seq_cst, align 2
2734 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2735 // CHECK-NEXT:    [[TMP628:%.*]] = load i16, i16* [[SE]], align 2
2736 // CHECK-NEXT:    [[TMP629:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP628]] seq_cst, align 2
2737 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2738 // CHECK-NEXT:    [[TMP630:%.*]] = load i16, i16* [[SE]], align 2
2739 // CHECK-NEXT:    [[TMP631:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP630]] seq_cst, align 2
2740 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2741 // CHECK-NEXT:    [[TMP632:%.*]] = load i16, i16* [[SE]], align 2
2742 // CHECK-NEXT:    [[TMP633:%.*]] = load i16, i16* [[SD]], align 2
2743 // CHECK-NEXT:    [[TMP634:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP632]], i16 [[TMP633]] seq_cst seq_cst, align 2
2744 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2745 // CHECK-NEXT:    [[TMP635:%.*]] = load i16, i16* [[SE]], align 2
2746 // CHECK-NEXT:    [[TMP636:%.*]] = load i16, i16* [[SD]], align 2
2747 // CHECK-NEXT:    [[TMP637:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP635]], i16 [[TMP636]] seq_cst seq_cst, align 2
2748 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2749 // CHECK-NEXT:    [[TMP638:%.*]] = load i16, i16* [[SE]], align 2
2750 // CHECK-NEXT:    [[TMP639:%.*]] = load i16, i16* [[SD]], align 2
2751 // CHECK-NEXT:    [[TMP640:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP638]], i16 [[TMP639]] seq_cst seq_cst, align 2
2752 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2753 // CHECK-NEXT:    [[TMP641:%.*]] = load i16, i16* [[SE]], align 2
2754 // CHECK-NEXT:    [[TMP642:%.*]] = load i16, i16* [[SD]], align 2
2755 // CHECK-NEXT:    [[TMP643:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP641]], i16 [[TMP642]] seq_cst seq_cst, align 2
2756 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2757 // CHECK-NEXT:    [[TMP644:%.*]] = load i16, i16* [[USE]], align 2
2758 // CHECK-NEXT:    [[TMP645:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP644]] seq_cst, align 2
2759 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2760 // CHECK-NEXT:    [[TMP646:%.*]] = load i16, i16* [[USE]], align 2
2761 // CHECK-NEXT:    [[TMP647:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP646]] seq_cst, align 2
2762 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2763 // CHECK-NEXT:    [[TMP648:%.*]] = load i16, i16* [[USE]], align 2
2764 // CHECK-NEXT:    [[TMP649:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP648]] seq_cst, align 2
2765 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2766 // CHECK-NEXT:    [[TMP650:%.*]] = load i16, i16* [[USE]], align 2
2767 // CHECK-NEXT:    [[TMP651:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP650]] seq_cst, align 2
2768 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2769 // CHECK-NEXT:    [[TMP652:%.*]] = load i16, i16* [[USE]], align 2
2770 // CHECK-NEXT:    [[TMP653:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP652]] seq_cst, align 2
2771 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2772 // CHECK-NEXT:    [[TMP654:%.*]] = load i16, i16* [[USE]], align 2
2773 // CHECK-NEXT:    [[TMP655:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP654]] seq_cst, align 2
2774 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2775 // CHECK-NEXT:    [[TMP656:%.*]] = load i16, i16* [[USE]], align 2
2776 // CHECK-NEXT:    [[TMP657:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP656]] seq_cst, align 2
2777 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2778 // CHECK-NEXT:    [[TMP658:%.*]] = load i16, i16* [[USE]], align 2
2779 // CHECK-NEXT:    [[TMP659:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP658]] seq_cst, align 2
2780 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2781 // CHECK-NEXT:    [[TMP660:%.*]] = load i16, i16* [[USE]], align 2
2782 // CHECK-NEXT:    [[TMP661:%.*]] = load i16, i16* [[USD]], align 2
2783 // CHECK-NEXT:    [[TMP662:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP660]], i16 [[TMP661]] seq_cst seq_cst, align 2
2784 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2785 // CHECK-NEXT:    [[TMP663:%.*]] = load i16, i16* [[USE]], align 2
2786 // CHECK-NEXT:    [[TMP664:%.*]] = load i16, i16* [[USD]], align 2
2787 // CHECK-NEXT:    [[TMP665:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP663]], i16 [[TMP664]] seq_cst seq_cst, align 2
2788 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2789 // CHECK-NEXT:    [[TMP666:%.*]] = load i16, i16* [[USE]], align 2
2790 // CHECK-NEXT:    [[TMP667:%.*]] = load i16, i16* [[USD]], align 2
2791 // CHECK-NEXT:    [[TMP668:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP666]], i16 [[TMP667]] seq_cst seq_cst, align 2
2792 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2793 // CHECK-NEXT:    [[TMP669:%.*]] = load i16, i16* [[USE]], align 2
2794 // CHECK-NEXT:    [[TMP670:%.*]] = load i16, i16* [[USD]], align 2
2795 // CHECK-NEXT:    [[TMP671:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP669]], i16 [[TMP670]] seq_cst seq_cst, align 2
2796 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2797 // CHECK-NEXT:    [[TMP672:%.*]] = load i32, i32* [[IE]], align 4
2798 // CHECK-NEXT:    [[TMP673:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP672]] monotonic, align 4
2799 // CHECK-NEXT:    [[TMP674:%.*]] = load i32, i32* [[IE]], align 4
2800 // CHECK-NEXT:    [[TMP675:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP674]] monotonic, align 4
2801 // CHECK-NEXT:    [[TMP676:%.*]] = load i32, i32* [[IE]], align 4
2802 // CHECK-NEXT:    [[TMP677:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP676]] monotonic, align 4
2803 // CHECK-NEXT:    [[TMP678:%.*]] = load i32, i32* [[IE]], align 4
2804 // CHECK-NEXT:    [[TMP679:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP678]] monotonic, align 4
2805 // CHECK-NEXT:    [[TMP680:%.*]] = load i32, i32* [[IE]], align 4
2806 // CHECK-NEXT:    [[TMP681:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP680]] monotonic, align 4
2807 // CHECK-NEXT:    [[TMP682:%.*]] = load i32, i32* [[IE]], align 4
2808 // CHECK-NEXT:    [[TMP683:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP682]] monotonic, align 4
2809 // CHECK-NEXT:    [[TMP684:%.*]] = load i32, i32* [[IE]], align 4
2810 // CHECK-NEXT:    [[TMP685:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP684]] monotonic, align 4
2811 // CHECK-NEXT:    [[TMP686:%.*]] = load i32, i32* [[IE]], align 4
2812 // CHECK-NEXT:    [[TMP687:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP686]] monotonic, align 4
2813 // CHECK-NEXT:    [[TMP688:%.*]] = load i32, i32* [[IE]], align 4
2814 // CHECK-NEXT:    [[TMP689:%.*]] = load i32, i32* [[ID]], align 4
2815 // CHECK-NEXT:    [[TMP690:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP688]], i32 [[TMP689]] monotonic monotonic, align 4
2816 // CHECK-NEXT:    [[TMP691:%.*]] = load i32, i32* [[IE]], align 4
2817 // CHECK-NEXT:    [[TMP692:%.*]] = load i32, i32* [[ID]], align 4
2818 // CHECK-NEXT:    [[TMP693:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP691]], i32 [[TMP692]] monotonic monotonic, align 4
2819 // CHECK-NEXT:    [[TMP694:%.*]] = load i32, i32* [[IE]], align 4
2820 // CHECK-NEXT:    [[TMP695:%.*]] = load i32, i32* [[ID]], align 4
2821 // CHECK-NEXT:    [[TMP696:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP694]], i32 [[TMP695]] monotonic monotonic, align 4
2822 // CHECK-NEXT:    [[TMP697:%.*]] = load i32, i32* [[IE]], align 4
2823 // CHECK-NEXT:    [[TMP698:%.*]] = load i32, i32* [[ID]], align 4
2824 // CHECK-NEXT:    [[TMP699:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP697]], i32 [[TMP698]] monotonic monotonic, align 4
2825 // CHECK-NEXT:    [[TMP700:%.*]] = load i32, i32* [[UIE]], align 4
2826 // CHECK-NEXT:    [[TMP701:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP700]] monotonic, align 4
2827 // CHECK-NEXT:    [[TMP702:%.*]] = load i32, i32* [[UIE]], align 4
2828 // CHECK-NEXT:    [[TMP703:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP702]] monotonic, align 4
2829 // CHECK-NEXT:    [[TMP704:%.*]] = load i32, i32* [[UIE]], align 4
2830 // CHECK-NEXT:    [[TMP705:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP704]] monotonic, align 4
2831 // CHECK-NEXT:    [[TMP706:%.*]] = load i32, i32* [[UIE]], align 4
2832 // CHECK-NEXT:    [[TMP707:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP706]] monotonic, align 4
2833 // CHECK-NEXT:    [[TMP708:%.*]] = load i32, i32* [[UIE]], align 4
2834 // CHECK-NEXT:    [[TMP709:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP708]] monotonic, align 4
2835 // CHECK-NEXT:    [[TMP710:%.*]] = load i32, i32* [[UIE]], align 4
2836 // CHECK-NEXT:    [[TMP711:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP710]] monotonic, align 4
2837 // CHECK-NEXT:    [[TMP712:%.*]] = load i32, i32* [[UIE]], align 4
2838 // CHECK-NEXT:    [[TMP713:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP712]] monotonic, align 4
2839 // CHECK-NEXT:    [[TMP714:%.*]] = load i32, i32* [[UIE]], align 4
2840 // CHECK-NEXT:    [[TMP715:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP714]] monotonic, align 4
2841 // CHECK-NEXT:    [[TMP716:%.*]] = load i32, i32* [[UIE]], align 4
2842 // CHECK-NEXT:    [[TMP717:%.*]] = load i32, i32* [[UID]], align 4
2843 // CHECK-NEXT:    [[TMP718:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP716]], i32 [[TMP717]] monotonic monotonic, align 4
2844 // CHECK-NEXT:    [[TMP719:%.*]] = load i32, i32* [[UIE]], align 4
2845 // CHECK-NEXT:    [[TMP720:%.*]] = load i32, i32* [[UID]], align 4
2846 // CHECK-NEXT:    [[TMP721:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP719]], i32 [[TMP720]] monotonic monotonic, align 4
2847 // CHECK-NEXT:    [[TMP722:%.*]] = load i32, i32* [[UIE]], align 4
2848 // CHECK-NEXT:    [[TMP723:%.*]] = load i32, i32* [[UID]], align 4
2849 // CHECK-NEXT:    [[TMP724:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP722]], i32 [[TMP723]] monotonic monotonic, align 4
2850 // CHECK-NEXT:    [[TMP725:%.*]] = load i32, i32* [[UIE]], align 4
2851 // CHECK-NEXT:    [[TMP726:%.*]] = load i32, i32* [[UID]], align 4
2852 // CHECK-NEXT:    [[TMP727:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP725]], i32 [[TMP726]] monotonic monotonic, align 4
2853 // CHECK-NEXT:    [[TMP728:%.*]] = load i32, i32* [[IE]], align 4
2854 // CHECK-NEXT:    [[TMP729:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP728]] acq_rel, align 4
2855 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2856 // CHECK-NEXT:    [[TMP730:%.*]] = load i32, i32* [[IE]], align 4
2857 // CHECK-NEXT:    [[TMP731:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP730]] acq_rel, align 4
2858 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2859 // CHECK-NEXT:    [[TMP732:%.*]] = load i32, i32* [[IE]], align 4
2860 // CHECK-NEXT:    [[TMP733:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP732]] acq_rel, align 4
2861 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2862 // CHECK-NEXT:    [[TMP734:%.*]] = load i32, i32* [[IE]], align 4
2863 // CHECK-NEXT:    [[TMP735:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP734]] acq_rel, align 4
2864 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2865 // CHECK-NEXT:    [[TMP736:%.*]] = load i32, i32* [[IE]], align 4
2866 // CHECK-NEXT:    [[TMP737:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP736]] acq_rel, align 4
2867 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2868 // CHECK-NEXT:    [[TMP738:%.*]] = load i32, i32* [[IE]], align 4
2869 // CHECK-NEXT:    [[TMP739:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP738]] acq_rel, align 4
2870 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2871 // CHECK-NEXT:    [[TMP740:%.*]] = load i32, i32* [[IE]], align 4
2872 // CHECK-NEXT:    [[TMP741:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP740]] acq_rel, align 4
2873 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2874 // CHECK-NEXT:    [[TMP742:%.*]] = load i32, i32* [[IE]], align 4
2875 // CHECK-NEXT:    [[TMP743:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP742]] acq_rel, align 4
2876 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2877 // CHECK-NEXT:    [[TMP744:%.*]] = load i32, i32* [[IE]], align 4
2878 // CHECK-NEXT:    [[TMP745:%.*]] = load i32, i32* [[ID]], align 4
2879 // CHECK-NEXT:    [[TMP746:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP744]], i32 [[TMP745]] acq_rel acquire, align 4
2880 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2881 // CHECK-NEXT:    [[TMP747:%.*]] = load i32, i32* [[IE]], align 4
2882 // CHECK-NEXT:    [[TMP748:%.*]] = load i32, i32* [[ID]], align 4
2883 // CHECK-NEXT:    [[TMP749:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP747]], i32 [[TMP748]] acq_rel acquire, align 4
2884 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2885 // CHECK-NEXT:    [[TMP750:%.*]] = load i32, i32* [[IE]], align 4
2886 // CHECK-NEXT:    [[TMP751:%.*]] = load i32, i32* [[ID]], align 4
2887 // CHECK-NEXT:    [[TMP752:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP750]], i32 [[TMP751]] acq_rel acquire, align 4
2888 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2889 // CHECK-NEXT:    [[TMP753:%.*]] = load i32, i32* [[IE]], align 4
2890 // CHECK-NEXT:    [[TMP754:%.*]] = load i32, i32* [[ID]], align 4
2891 // CHECK-NEXT:    [[TMP755:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP753]], i32 [[TMP754]] acq_rel acquire, align 4
2892 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2893 // CHECK-NEXT:    [[TMP756:%.*]] = load i32, i32* [[UIE]], align 4
2894 // CHECK-NEXT:    [[TMP757:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP756]] acq_rel, align 4
2895 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2896 // CHECK-NEXT:    [[TMP758:%.*]] = load i32, i32* [[UIE]], align 4
2897 // CHECK-NEXT:    [[TMP759:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP758]] acq_rel, align 4
2898 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2899 // CHECK-NEXT:    [[TMP760:%.*]] = load i32, i32* [[UIE]], align 4
2900 // CHECK-NEXT:    [[TMP761:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP760]] acq_rel, align 4
2901 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2902 // CHECK-NEXT:    [[TMP762:%.*]] = load i32, i32* [[UIE]], align 4
2903 // CHECK-NEXT:    [[TMP763:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP762]] acq_rel, align 4
2904 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2905 // CHECK-NEXT:    [[TMP764:%.*]] = load i32, i32* [[UIE]], align 4
2906 // CHECK-NEXT:    [[TMP765:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP764]] acq_rel, align 4
2907 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2908 // CHECK-NEXT:    [[TMP766:%.*]] = load i32, i32* [[UIE]], align 4
2909 // CHECK-NEXT:    [[TMP767:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP766]] acq_rel, align 4
2910 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2911 // CHECK-NEXT:    [[TMP768:%.*]] = load i32, i32* [[UIE]], align 4
2912 // CHECK-NEXT:    [[TMP769:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP768]] acq_rel, align 4
2913 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2914 // CHECK-NEXT:    [[TMP770:%.*]] = load i32, i32* [[UIE]], align 4
2915 // CHECK-NEXT:    [[TMP771:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP770]] acq_rel, align 4
2916 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2917 // CHECK-NEXT:    [[TMP772:%.*]] = load i32, i32* [[UIE]], align 4
2918 // CHECK-NEXT:    [[TMP773:%.*]] = load i32, i32* [[UID]], align 4
2919 // CHECK-NEXT:    [[TMP774:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP772]], i32 [[TMP773]] acq_rel acquire, align 4
2920 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2921 // CHECK-NEXT:    [[TMP775:%.*]] = load i32, i32* [[UIE]], align 4
2922 // CHECK-NEXT:    [[TMP776:%.*]] = load i32, i32* [[UID]], align 4
2923 // CHECK-NEXT:    [[TMP777:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP775]], i32 [[TMP776]] acq_rel acquire, align 4
2924 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2925 // CHECK-NEXT:    [[TMP778:%.*]] = load i32, i32* [[UIE]], align 4
2926 // CHECK-NEXT:    [[TMP779:%.*]] = load i32, i32* [[UID]], align 4
2927 // CHECK-NEXT:    [[TMP780:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP778]], i32 [[TMP779]] acq_rel acquire, align 4
2928 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2929 // CHECK-NEXT:    [[TMP781:%.*]] = load i32, i32* [[UIE]], align 4
2930 // CHECK-NEXT:    [[TMP782:%.*]] = load i32, i32* [[UID]], align 4
2931 // CHECK-NEXT:    [[TMP783:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP781]], i32 [[TMP782]] acq_rel acquire, align 4
2932 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
2933 // CHECK-NEXT:    [[TMP784:%.*]] = load i32, i32* [[IE]], align 4
2934 // CHECK-NEXT:    [[TMP785:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP784]] acquire, align 4
2935 // CHECK-NEXT:    [[TMP786:%.*]] = load i32, i32* [[IE]], align 4
2936 // CHECK-NEXT:    [[TMP787:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP786]] acquire, align 4
2937 // CHECK-NEXT:    [[TMP788:%.*]] = load i32, i32* [[IE]], align 4
2938 // CHECK-NEXT:    [[TMP789:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP788]] acquire, align 4
2939 // CHECK-NEXT:    [[TMP790:%.*]] = load i32, i32* [[IE]], align 4
2940 // CHECK-NEXT:    [[TMP791:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP790]] acquire, align 4
2941 // CHECK-NEXT:    [[TMP792:%.*]] = load i32, i32* [[IE]], align 4
2942 // CHECK-NEXT:    [[TMP793:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP792]] acquire, align 4
2943 // CHECK-NEXT:    [[TMP794:%.*]] = load i32, i32* [[IE]], align 4
2944 // CHECK-NEXT:    [[TMP795:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP794]] acquire, align 4
2945 // CHECK-NEXT:    [[TMP796:%.*]] = load i32, i32* [[IE]], align 4
2946 // CHECK-NEXT:    [[TMP797:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP796]] acquire, align 4
2947 // CHECK-NEXT:    [[TMP798:%.*]] = load i32, i32* [[IE]], align 4
2948 // CHECK-NEXT:    [[TMP799:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP798]] acquire, align 4
2949 // CHECK-NEXT:    [[TMP800:%.*]] = load i32, i32* [[IE]], align 4
2950 // CHECK-NEXT:    [[TMP801:%.*]] = load i32, i32* [[ID]], align 4
2951 // CHECK-NEXT:    [[TMP802:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP800]], i32 [[TMP801]] acquire acquire, align 4
2952 // CHECK-NEXT:    [[TMP803:%.*]] = load i32, i32* [[IE]], align 4
2953 // CHECK-NEXT:    [[TMP804:%.*]] = load i32, i32* [[ID]], align 4
2954 // CHECK-NEXT:    [[TMP805:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP803]], i32 [[TMP804]] acquire acquire, align 4
2955 // CHECK-NEXT:    [[TMP806:%.*]] = load i32, i32* [[IE]], align 4
2956 // CHECK-NEXT:    [[TMP807:%.*]] = load i32, i32* [[ID]], align 4
2957 // CHECK-NEXT:    [[TMP808:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP806]], i32 [[TMP807]] acquire acquire, align 4
2958 // CHECK-NEXT:    [[TMP809:%.*]] = load i32, i32* [[IE]], align 4
2959 // CHECK-NEXT:    [[TMP810:%.*]] = load i32, i32* [[ID]], align 4
2960 // CHECK-NEXT:    [[TMP811:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP809]], i32 [[TMP810]] acquire acquire, align 4
2961 // CHECK-NEXT:    [[TMP812:%.*]] = load i32, i32* [[UIE]], align 4
2962 // CHECK-NEXT:    [[TMP813:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP812]] acquire, align 4
2963 // CHECK-NEXT:    [[TMP814:%.*]] = load i32, i32* [[UIE]], align 4
2964 // CHECK-NEXT:    [[TMP815:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP814]] acquire, align 4
2965 // CHECK-NEXT:    [[TMP816:%.*]] = load i32, i32* [[UIE]], align 4
2966 // CHECK-NEXT:    [[TMP817:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP816]] acquire, align 4
2967 // CHECK-NEXT:    [[TMP818:%.*]] = load i32, i32* [[UIE]], align 4
2968 // CHECK-NEXT:    [[TMP819:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP818]] acquire, align 4
2969 // CHECK-NEXT:    [[TMP820:%.*]] = load i32, i32* [[UIE]], align 4
2970 // CHECK-NEXT:    [[TMP821:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP820]] acquire, align 4
2971 // CHECK-NEXT:    [[TMP822:%.*]] = load i32, i32* [[UIE]], align 4
2972 // CHECK-NEXT:    [[TMP823:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP822]] acquire, align 4
2973 // CHECK-NEXT:    [[TMP824:%.*]] = load i32, i32* [[UIE]], align 4
2974 // CHECK-NEXT:    [[TMP825:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP824]] acquire, align 4
2975 // CHECK-NEXT:    [[TMP826:%.*]] = load i32, i32* [[UIE]], align 4
2976 // CHECK-NEXT:    [[TMP827:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP826]] acquire, align 4
2977 // CHECK-NEXT:    [[TMP828:%.*]] = load i32, i32* [[UIE]], align 4
2978 // CHECK-NEXT:    [[TMP829:%.*]] = load i32, i32* [[UID]], align 4
2979 // CHECK-NEXT:    [[TMP830:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP828]], i32 [[TMP829]] acquire acquire, align 4
2980 // CHECK-NEXT:    [[TMP831:%.*]] = load i32, i32* [[UIE]], align 4
2981 // CHECK-NEXT:    [[TMP832:%.*]] = load i32, i32* [[UID]], align 4
2982 // CHECK-NEXT:    [[TMP833:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP831]], i32 [[TMP832]] acquire acquire, align 4
2983 // CHECK-NEXT:    [[TMP834:%.*]] = load i32, i32* [[UIE]], align 4
2984 // CHECK-NEXT:    [[TMP835:%.*]] = load i32, i32* [[UID]], align 4
2985 // CHECK-NEXT:    [[TMP836:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP834]], i32 [[TMP835]] acquire acquire, align 4
2986 // CHECK-NEXT:    [[TMP837:%.*]] = load i32, i32* [[UIE]], align 4
2987 // CHECK-NEXT:    [[TMP838:%.*]] = load i32, i32* [[UID]], align 4
2988 // CHECK-NEXT:    [[TMP839:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP837]], i32 [[TMP838]] acquire acquire, align 4
2989 // CHECK-NEXT:    [[TMP840:%.*]] = load i32, i32* [[IE]], align 4
2990 // CHECK-NEXT:    [[TMP841:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP840]] monotonic, align 4
2991 // CHECK-NEXT:    [[TMP842:%.*]] = load i32, i32* [[IE]], align 4
2992 // CHECK-NEXT:    [[TMP843:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP842]] monotonic, align 4
2993 // CHECK-NEXT:    [[TMP844:%.*]] = load i32, i32* [[IE]], align 4
2994 // CHECK-NEXT:    [[TMP845:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP844]] monotonic, align 4
2995 // CHECK-NEXT:    [[TMP846:%.*]] = load i32, i32* [[IE]], align 4
2996 // CHECK-NEXT:    [[TMP847:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP846]] monotonic, align 4
2997 // CHECK-NEXT:    [[TMP848:%.*]] = load i32, i32* [[IE]], align 4
2998 // CHECK-NEXT:    [[TMP849:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP848]] monotonic, align 4
2999 // CHECK-NEXT:    [[TMP850:%.*]] = load i32, i32* [[IE]], align 4
3000 // CHECK-NEXT:    [[TMP851:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP850]] monotonic, align 4
3001 // CHECK-NEXT:    [[TMP852:%.*]] = load i32, i32* [[IE]], align 4
3002 // CHECK-NEXT:    [[TMP853:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP852]] monotonic, align 4
3003 // CHECK-NEXT:    [[TMP854:%.*]] = load i32, i32* [[IE]], align 4
3004 // CHECK-NEXT:    [[TMP855:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP854]] monotonic, align 4
3005 // CHECK-NEXT:    [[TMP856:%.*]] = load i32, i32* [[IE]], align 4
3006 // CHECK-NEXT:    [[TMP857:%.*]] = load i32, i32* [[ID]], align 4
3007 // CHECK-NEXT:    [[TMP858:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP856]], i32 [[TMP857]] monotonic monotonic, align 4
3008 // CHECK-NEXT:    [[TMP859:%.*]] = load i32, i32* [[IE]], align 4
3009 // CHECK-NEXT:    [[TMP860:%.*]] = load i32, i32* [[ID]], align 4
3010 // CHECK-NEXT:    [[TMP861:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP859]], i32 [[TMP860]] monotonic monotonic, align 4
3011 // CHECK-NEXT:    [[TMP862:%.*]] = load i32, i32* [[IE]], align 4
3012 // CHECK-NEXT:    [[TMP863:%.*]] = load i32, i32* [[ID]], align 4
3013 // CHECK-NEXT:    [[TMP864:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP862]], i32 [[TMP863]] monotonic monotonic, align 4
3014 // CHECK-NEXT:    [[TMP865:%.*]] = load i32, i32* [[IE]], align 4
3015 // CHECK-NEXT:    [[TMP866:%.*]] = load i32, i32* [[ID]], align 4
3016 // CHECK-NEXT:    [[TMP867:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP865]], i32 [[TMP866]] monotonic monotonic, align 4
3017 // CHECK-NEXT:    [[TMP868:%.*]] = load i32, i32* [[UIE]], align 4
3018 // CHECK-NEXT:    [[TMP869:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP868]] monotonic, align 4
3019 // CHECK-NEXT:    [[TMP870:%.*]] = load i32, i32* [[UIE]], align 4
3020 // CHECK-NEXT:    [[TMP871:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP870]] monotonic, align 4
3021 // CHECK-NEXT:    [[TMP872:%.*]] = load i32, i32* [[UIE]], align 4
3022 // CHECK-NEXT:    [[TMP873:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP872]] monotonic, align 4
3023 // CHECK-NEXT:    [[TMP874:%.*]] = load i32, i32* [[UIE]], align 4
3024 // CHECK-NEXT:    [[TMP875:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP874]] monotonic, align 4
3025 // CHECK-NEXT:    [[TMP876:%.*]] = load i32, i32* [[UIE]], align 4
3026 // CHECK-NEXT:    [[TMP877:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP876]] monotonic, align 4
3027 // CHECK-NEXT:    [[TMP878:%.*]] = load i32, i32* [[UIE]], align 4
3028 // CHECK-NEXT:    [[TMP879:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP878]] monotonic, align 4
3029 // CHECK-NEXT:    [[TMP880:%.*]] = load i32, i32* [[UIE]], align 4
3030 // CHECK-NEXT:    [[TMP881:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP880]] monotonic, align 4
3031 // CHECK-NEXT:    [[TMP882:%.*]] = load i32, i32* [[UIE]], align 4
3032 // CHECK-NEXT:    [[TMP883:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP882]] monotonic, align 4
3033 // CHECK-NEXT:    [[TMP884:%.*]] = load i32, i32* [[UIE]], align 4
3034 // CHECK-NEXT:    [[TMP885:%.*]] = load i32, i32* [[UID]], align 4
3035 // CHECK-NEXT:    [[TMP886:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP884]], i32 [[TMP885]] monotonic monotonic, align 4
3036 // CHECK-NEXT:    [[TMP887:%.*]] = load i32, i32* [[UIE]], align 4
3037 // CHECK-NEXT:    [[TMP888:%.*]] = load i32, i32* [[UID]], align 4
3038 // CHECK-NEXT:    [[TMP889:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP887]], i32 [[TMP888]] monotonic monotonic, align 4
3039 // CHECK-NEXT:    [[TMP890:%.*]] = load i32, i32* [[UIE]], align 4
3040 // CHECK-NEXT:    [[TMP891:%.*]] = load i32, i32* [[UID]], align 4
3041 // CHECK-NEXT:    [[TMP892:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP890]], i32 [[TMP891]] monotonic monotonic, align 4
3042 // CHECK-NEXT:    [[TMP893:%.*]] = load i32, i32* [[UIE]], align 4
3043 // CHECK-NEXT:    [[TMP894:%.*]] = load i32, i32* [[UID]], align 4
3044 // CHECK-NEXT:    [[TMP895:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP893]], i32 [[TMP894]] monotonic monotonic, align 4
3045 // CHECK-NEXT:    [[TMP896:%.*]] = load i32, i32* [[IE]], align 4
3046 // CHECK-NEXT:    [[TMP897:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP896]] release, align 4
3047 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3048 // CHECK-NEXT:    [[TMP898:%.*]] = load i32, i32* [[IE]], align 4
3049 // CHECK-NEXT:    [[TMP899:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP898]] release, align 4
3050 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3051 // CHECK-NEXT:    [[TMP900:%.*]] = load i32, i32* [[IE]], align 4
3052 // CHECK-NEXT:    [[TMP901:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP900]] release, align 4
3053 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3054 // CHECK-NEXT:    [[TMP902:%.*]] = load i32, i32* [[IE]], align 4
3055 // CHECK-NEXT:    [[TMP903:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP902]] release, align 4
3056 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3057 // CHECK-NEXT:    [[TMP904:%.*]] = load i32, i32* [[IE]], align 4
3058 // CHECK-NEXT:    [[TMP905:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP904]] release, align 4
3059 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3060 // CHECK-NEXT:    [[TMP906:%.*]] = load i32, i32* [[IE]], align 4
3061 // CHECK-NEXT:    [[TMP907:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP906]] release, align 4
3062 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3063 // CHECK-NEXT:    [[TMP908:%.*]] = load i32, i32* [[IE]], align 4
3064 // CHECK-NEXT:    [[TMP909:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP908]] release, align 4
3065 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3066 // CHECK-NEXT:    [[TMP910:%.*]] = load i32, i32* [[IE]], align 4
3067 // CHECK-NEXT:    [[TMP911:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP910]] release, align 4
3068 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3069 // CHECK-NEXT:    [[TMP912:%.*]] = load i32, i32* [[IE]], align 4
3070 // CHECK-NEXT:    [[TMP913:%.*]] = load i32, i32* [[ID]], align 4
3071 // CHECK-NEXT:    [[TMP914:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP912]], i32 [[TMP913]] release monotonic, align 4
3072 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3073 // CHECK-NEXT:    [[TMP915:%.*]] = load i32, i32* [[IE]], align 4
3074 // CHECK-NEXT:    [[TMP916:%.*]] = load i32, i32* [[ID]], align 4
3075 // CHECK-NEXT:    [[TMP917:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP915]], i32 [[TMP916]] release monotonic, align 4
3076 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3077 // CHECK-NEXT:    [[TMP918:%.*]] = load i32, i32* [[IE]], align 4
3078 // CHECK-NEXT:    [[TMP919:%.*]] = load i32, i32* [[ID]], align 4
3079 // CHECK-NEXT:    [[TMP920:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP918]], i32 [[TMP919]] release monotonic, align 4
3080 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3081 // CHECK-NEXT:    [[TMP921:%.*]] = load i32, i32* [[IE]], align 4
3082 // CHECK-NEXT:    [[TMP922:%.*]] = load i32, i32* [[ID]], align 4
3083 // CHECK-NEXT:    [[TMP923:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP921]], i32 [[TMP922]] release monotonic, align 4
3084 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3085 // CHECK-NEXT:    [[TMP924:%.*]] = load i32, i32* [[UIE]], align 4
3086 // CHECK-NEXT:    [[TMP925:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP924]] release, align 4
3087 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3088 // CHECK-NEXT:    [[TMP926:%.*]] = load i32, i32* [[UIE]], align 4
3089 // CHECK-NEXT:    [[TMP927:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP926]] release, align 4
3090 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3091 // CHECK-NEXT:    [[TMP928:%.*]] = load i32, i32* [[UIE]], align 4
3092 // CHECK-NEXT:    [[TMP929:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP928]] release, align 4
3093 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3094 // CHECK-NEXT:    [[TMP930:%.*]] = load i32, i32* [[UIE]], align 4
3095 // CHECK-NEXT:    [[TMP931:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP930]] release, align 4
3096 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3097 // CHECK-NEXT:    [[TMP932:%.*]] = load i32, i32* [[UIE]], align 4
3098 // CHECK-NEXT:    [[TMP933:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP932]] release, align 4
3099 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3100 // CHECK-NEXT:    [[TMP934:%.*]] = load i32, i32* [[UIE]], align 4
3101 // CHECK-NEXT:    [[TMP935:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP934]] release, align 4
3102 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3103 // CHECK-NEXT:    [[TMP936:%.*]] = load i32, i32* [[UIE]], align 4
3104 // CHECK-NEXT:    [[TMP937:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP936]] release, align 4
3105 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3106 // CHECK-NEXT:    [[TMP938:%.*]] = load i32, i32* [[UIE]], align 4
3107 // CHECK-NEXT:    [[TMP939:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP938]] release, align 4
3108 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3109 // CHECK-NEXT:    [[TMP940:%.*]] = load i32, i32* [[UIE]], align 4
3110 // CHECK-NEXT:    [[TMP941:%.*]] = load i32, i32* [[UID]], align 4
3111 // CHECK-NEXT:    [[TMP942:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP940]], i32 [[TMP941]] release monotonic, align 4
3112 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3113 // CHECK-NEXT:    [[TMP943:%.*]] = load i32, i32* [[UIE]], align 4
3114 // CHECK-NEXT:    [[TMP944:%.*]] = load i32, i32* [[UID]], align 4
3115 // CHECK-NEXT:    [[TMP945:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP943]], i32 [[TMP944]] release monotonic, align 4
3116 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3117 // CHECK-NEXT:    [[TMP946:%.*]] = load i32, i32* [[UIE]], align 4
3118 // CHECK-NEXT:    [[TMP947:%.*]] = load i32, i32* [[UID]], align 4
3119 // CHECK-NEXT:    [[TMP948:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP946]], i32 [[TMP947]] release monotonic, align 4
3120 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3121 // CHECK-NEXT:    [[TMP949:%.*]] = load i32, i32* [[UIE]], align 4
3122 // CHECK-NEXT:    [[TMP950:%.*]] = load i32, i32* [[UID]], align 4
3123 // CHECK-NEXT:    [[TMP951:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP949]], i32 [[TMP950]] release monotonic, align 4
3124 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3125 // CHECK-NEXT:    [[TMP952:%.*]] = load i32, i32* [[IE]], align 4
3126 // CHECK-NEXT:    [[TMP953:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP952]] seq_cst, align 4
3127 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3128 // CHECK-NEXT:    [[TMP954:%.*]] = load i32, i32* [[IE]], align 4
3129 // CHECK-NEXT:    [[TMP955:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP954]] seq_cst, align 4
3130 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3131 // CHECK-NEXT:    [[TMP956:%.*]] = load i32, i32* [[IE]], align 4
3132 // CHECK-NEXT:    [[TMP957:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP956]] seq_cst, align 4
3133 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3134 // CHECK-NEXT:    [[TMP958:%.*]] = load i32, i32* [[IE]], align 4
3135 // CHECK-NEXT:    [[TMP959:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP958]] seq_cst, align 4
3136 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3137 // CHECK-NEXT:    [[TMP960:%.*]] = load i32, i32* [[IE]], align 4
3138 // CHECK-NEXT:    [[TMP961:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP960]] seq_cst, align 4
3139 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3140 // CHECK-NEXT:    [[TMP962:%.*]] = load i32, i32* [[IE]], align 4
3141 // CHECK-NEXT:    [[TMP963:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP962]] seq_cst, align 4
3142 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3143 // CHECK-NEXT:    [[TMP964:%.*]] = load i32, i32* [[IE]], align 4
3144 // CHECK-NEXT:    [[TMP965:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP964]] seq_cst, align 4
3145 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3146 // CHECK-NEXT:    [[TMP966:%.*]] = load i32, i32* [[IE]], align 4
3147 // CHECK-NEXT:    [[TMP967:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP966]] seq_cst, align 4
3148 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3149 // CHECK-NEXT:    [[TMP968:%.*]] = load i32, i32* [[IE]], align 4
3150 // CHECK-NEXT:    [[TMP969:%.*]] = load i32, i32* [[ID]], align 4
3151 // CHECK-NEXT:    [[TMP970:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP968]], i32 [[TMP969]] seq_cst seq_cst, align 4
3152 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3153 // CHECK-NEXT:    [[TMP971:%.*]] = load i32, i32* [[IE]], align 4
3154 // CHECK-NEXT:    [[TMP972:%.*]] = load i32, i32* [[ID]], align 4
3155 // CHECK-NEXT:    [[TMP973:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP971]], i32 [[TMP972]] seq_cst seq_cst, align 4
3156 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3157 // CHECK-NEXT:    [[TMP974:%.*]] = load i32, i32* [[IE]], align 4
3158 // CHECK-NEXT:    [[TMP975:%.*]] = load i32, i32* [[ID]], align 4
3159 // CHECK-NEXT:    [[TMP976:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP974]], i32 [[TMP975]] seq_cst seq_cst, align 4
3160 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3161 // CHECK-NEXT:    [[TMP977:%.*]] = load i32, i32* [[IE]], align 4
3162 // CHECK-NEXT:    [[TMP978:%.*]] = load i32, i32* [[ID]], align 4
3163 // CHECK-NEXT:    [[TMP979:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP977]], i32 [[TMP978]] seq_cst seq_cst, align 4
3164 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3165 // CHECK-NEXT:    [[TMP980:%.*]] = load i32, i32* [[UIE]], align 4
3166 // CHECK-NEXT:    [[TMP981:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP980]] seq_cst, align 4
3167 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3168 // CHECK-NEXT:    [[TMP982:%.*]] = load i32, i32* [[UIE]], align 4
3169 // CHECK-NEXT:    [[TMP983:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP982]] seq_cst, align 4
3170 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3171 // CHECK-NEXT:    [[TMP984:%.*]] = load i32, i32* [[UIE]], align 4
3172 // CHECK-NEXT:    [[TMP985:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP984]] seq_cst, align 4
3173 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3174 // CHECK-NEXT:    [[TMP986:%.*]] = load i32, i32* [[UIE]], align 4
3175 // CHECK-NEXT:    [[TMP987:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP986]] seq_cst, align 4
3176 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3177 // CHECK-NEXT:    [[TMP988:%.*]] = load i32, i32* [[UIE]], align 4
3178 // CHECK-NEXT:    [[TMP989:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP988]] seq_cst, align 4
3179 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3180 // CHECK-NEXT:    [[TMP990:%.*]] = load i32, i32* [[UIE]], align 4
3181 // CHECK-NEXT:    [[TMP991:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP990]] seq_cst, align 4
3182 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3183 // CHECK-NEXT:    [[TMP992:%.*]] = load i32, i32* [[UIE]], align 4
3184 // CHECK-NEXT:    [[TMP993:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP992]] seq_cst, align 4
3185 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3186 // CHECK-NEXT:    [[TMP994:%.*]] = load i32, i32* [[UIE]], align 4
3187 // CHECK-NEXT:    [[TMP995:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP994]] seq_cst, align 4
3188 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3189 // CHECK-NEXT:    [[TMP996:%.*]] = load i32, i32* [[UIE]], align 4
3190 // CHECK-NEXT:    [[TMP997:%.*]] = load i32, i32* [[UID]], align 4
3191 // CHECK-NEXT:    [[TMP998:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP996]], i32 [[TMP997]] seq_cst seq_cst, align 4
3192 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3193 // CHECK-NEXT:    [[TMP999:%.*]] = load i32, i32* [[UIE]], align 4
3194 // CHECK-NEXT:    [[TMP1000:%.*]] = load i32, i32* [[UID]], align 4
3195 // CHECK-NEXT:    [[TMP1001:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP999]], i32 [[TMP1000]] seq_cst seq_cst, align 4
3196 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3197 // CHECK-NEXT:    [[TMP1002:%.*]] = load i32, i32* [[UIE]], align 4
3198 // CHECK-NEXT:    [[TMP1003:%.*]] = load i32, i32* [[UID]], align 4
3199 // CHECK-NEXT:    [[TMP1004:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP1002]], i32 [[TMP1003]] seq_cst seq_cst, align 4
3200 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3201 // CHECK-NEXT:    [[TMP1005:%.*]] = load i32, i32* [[UIE]], align 4
3202 // CHECK-NEXT:    [[TMP1006:%.*]] = load i32, i32* [[UID]], align 4
3203 // CHECK-NEXT:    [[TMP1007:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP1005]], i32 [[TMP1006]] seq_cst seq_cst, align 4
3204 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3205 // CHECK-NEXT:    [[TMP1008:%.*]] = load i64, i64* [[LE]], align 8
3206 // CHECK-NEXT:    [[TMP1009:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1008]] monotonic, align 8
3207 // CHECK-NEXT:    [[TMP1010:%.*]] = load i64, i64* [[LE]], align 8
3208 // CHECK-NEXT:    [[TMP1011:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1010]] monotonic, align 8
3209 // CHECK-NEXT:    [[TMP1012:%.*]] = load i64, i64* [[LE]], align 8
3210 // CHECK-NEXT:    [[TMP1013:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1012]] monotonic, align 8
3211 // CHECK-NEXT:    [[TMP1014:%.*]] = load i64, i64* [[LE]], align 8
3212 // CHECK-NEXT:    [[TMP1015:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1014]] monotonic, align 8
3213 // CHECK-NEXT:    [[TMP1016:%.*]] = load i64, i64* [[LE]], align 8
3214 // CHECK-NEXT:    [[TMP1017:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1016]] monotonic, align 8
3215 // CHECK-NEXT:    [[TMP1018:%.*]] = load i64, i64* [[LE]], align 8
3216 // CHECK-NEXT:    [[TMP1019:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1018]] monotonic, align 8
3217 // CHECK-NEXT:    [[TMP1020:%.*]] = load i64, i64* [[LE]], align 8
3218 // CHECK-NEXT:    [[TMP1021:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1020]] monotonic, align 8
3219 // CHECK-NEXT:    [[TMP1022:%.*]] = load i64, i64* [[LE]], align 8
3220 // CHECK-NEXT:    [[TMP1023:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1022]] monotonic, align 8
3221 // CHECK-NEXT:    [[TMP1024:%.*]] = load i64, i64* [[LE]], align 8
3222 // CHECK-NEXT:    [[TMP1025:%.*]] = load i64, i64* [[LD]], align 8
3223 // CHECK-NEXT:    [[TMP1026:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1024]], i64 [[TMP1025]] monotonic monotonic, align 8
3224 // CHECK-NEXT:    [[TMP1027:%.*]] = load i64, i64* [[LE]], align 8
3225 // CHECK-NEXT:    [[TMP1028:%.*]] = load i64, i64* [[LD]], align 8
3226 // CHECK-NEXT:    [[TMP1029:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1027]], i64 [[TMP1028]] monotonic monotonic, align 8
3227 // CHECK-NEXT:    [[TMP1030:%.*]] = load i64, i64* [[LE]], align 8
3228 // CHECK-NEXT:    [[TMP1031:%.*]] = load i64, i64* [[LD]], align 8
3229 // CHECK-NEXT:    [[TMP1032:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1030]], i64 [[TMP1031]] monotonic monotonic, align 8
3230 // CHECK-NEXT:    [[TMP1033:%.*]] = load i64, i64* [[LE]], align 8
3231 // CHECK-NEXT:    [[TMP1034:%.*]] = load i64, i64* [[LD]], align 8
3232 // CHECK-NEXT:    [[TMP1035:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1033]], i64 [[TMP1034]] monotonic monotonic, align 8
3233 // CHECK-NEXT:    [[TMP1036:%.*]] = load i64, i64* [[ULE]], align 8
3234 // CHECK-NEXT:    [[TMP1037:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1036]] monotonic, align 8
3235 // CHECK-NEXT:    [[TMP1038:%.*]] = load i64, i64* [[ULE]], align 8
3236 // CHECK-NEXT:    [[TMP1039:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1038]] monotonic, align 8
3237 // CHECK-NEXT:    [[TMP1040:%.*]] = load i64, i64* [[ULE]], align 8
3238 // CHECK-NEXT:    [[TMP1041:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1040]] monotonic, align 8
3239 // CHECK-NEXT:    [[TMP1042:%.*]] = load i64, i64* [[ULE]], align 8
3240 // CHECK-NEXT:    [[TMP1043:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1042]] monotonic, align 8
3241 // CHECK-NEXT:    [[TMP1044:%.*]] = load i64, i64* [[ULE]], align 8
3242 // CHECK-NEXT:    [[TMP1045:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1044]] monotonic, align 8
3243 // CHECK-NEXT:    [[TMP1046:%.*]] = load i64, i64* [[ULE]], align 8
3244 // CHECK-NEXT:    [[TMP1047:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1046]] monotonic, align 8
3245 // CHECK-NEXT:    [[TMP1048:%.*]] = load i64, i64* [[ULE]], align 8
3246 // CHECK-NEXT:    [[TMP1049:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1048]] monotonic, align 8
3247 // CHECK-NEXT:    [[TMP1050:%.*]] = load i64, i64* [[ULE]], align 8
3248 // CHECK-NEXT:    [[TMP1051:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1050]] monotonic, align 8
3249 // CHECK-NEXT:    [[TMP1052:%.*]] = load i64, i64* [[ULE]], align 8
3250 // CHECK-NEXT:    [[TMP1053:%.*]] = load i64, i64* [[ULD]], align 8
3251 // CHECK-NEXT:    [[TMP1054:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1052]], i64 [[TMP1053]] monotonic monotonic, align 8
3252 // CHECK-NEXT:    [[TMP1055:%.*]] = load i64, i64* [[ULE]], align 8
3253 // CHECK-NEXT:    [[TMP1056:%.*]] = load i64, i64* [[ULD]], align 8
3254 // CHECK-NEXT:    [[TMP1057:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1055]], i64 [[TMP1056]] monotonic monotonic, align 8
3255 // CHECK-NEXT:    [[TMP1058:%.*]] = load i64, i64* [[ULE]], align 8
3256 // CHECK-NEXT:    [[TMP1059:%.*]] = load i64, i64* [[ULD]], align 8
3257 // CHECK-NEXT:    [[TMP1060:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1058]], i64 [[TMP1059]] monotonic monotonic, align 8
3258 // CHECK-NEXT:    [[TMP1061:%.*]] = load i64, i64* [[ULE]], align 8
3259 // CHECK-NEXT:    [[TMP1062:%.*]] = load i64, i64* [[ULD]], align 8
3260 // CHECK-NEXT:    [[TMP1063:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1061]], i64 [[TMP1062]] monotonic monotonic, align 8
3261 // CHECK-NEXT:    [[TMP1064:%.*]] = load i64, i64* [[LE]], align 8
3262 // CHECK-NEXT:    [[TMP1065:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1064]] acq_rel, align 8
3263 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3264 // CHECK-NEXT:    [[TMP1066:%.*]] = load i64, i64* [[LE]], align 8
3265 // CHECK-NEXT:    [[TMP1067:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1066]] acq_rel, align 8
3266 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3267 // CHECK-NEXT:    [[TMP1068:%.*]] = load i64, i64* [[LE]], align 8
3268 // CHECK-NEXT:    [[TMP1069:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1068]] acq_rel, align 8
3269 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3270 // CHECK-NEXT:    [[TMP1070:%.*]] = load i64, i64* [[LE]], align 8
3271 // CHECK-NEXT:    [[TMP1071:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1070]] acq_rel, align 8
3272 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3273 // CHECK-NEXT:    [[TMP1072:%.*]] = load i64, i64* [[LE]], align 8
3274 // CHECK-NEXT:    [[TMP1073:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1072]] acq_rel, align 8
3275 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3276 // CHECK-NEXT:    [[TMP1074:%.*]] = load i64, i64* [[LE]], align 8
3277 // CHECK-NEXT:    [[TMP1075:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1074]] acq_rel, align 8
3278 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3279 // CHECK-NEXT:    [[TMP1076:%.*]] = load i64, i64* [[LE]], align 8
3280 // CHECK-NEXT:    [[TMP1077:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1076]] acq_rel, align 8
3281 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3282 // CHECK-NEXT:    [[TMP1078:%.*]] = load i64, i64* [[LE]], align 8
3283 // CHECK-NEXT:    [[TMP1079:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1078]] acq_rel, align 8
3284 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3285 // CHECK-NEXT:    [[TMP1080:%.*]] = load i64, i64* [[LE]], align 8
3286 // CHECK-NEXT:    [[TMP1081:%.*]] = load i64, i64* [[LD]], align 8
3287 // CHECK-NEXT:    [[TMP1082:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1080]], i64 [[TMP1081]] acq_rel acquire, align 8
3288 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3289 // CHECK-NEXT:    [[TMP1083:%.*]] = load i64, i64* [[LE]], align 8
3290 // CHECK-NEXT:    [[TMP1084:%.*]] = load i64, i64* [[LD]], align 8
3291 // CHECK-NEXT:    [[TMP1085:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1083]], i64 [[TMP1084]] acq_rel acquire, align 8
3292 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3293 // CHECK-NEXT:    [[TMP1086:%.*]] = load i64, i64* [[LE]], align 8
3294 // CHECK-NEXT:    [[TMP1087:%.*]] = load i64, i64* [[LD]], align 8
3295 // CHECK-NEXT:    [[TMP1088:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1086]], i64 [[TMP1087]] acq_rel acquire, align 8
3296 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3297 // CHECK-NEXT:    [[TMP1089:%.*]] = load i64, i64* [[LE]], align 8
3298 // CHECK-NEXT:    [[TMP1090:%.*]] = load i64, i64* [[LD]], align 8
3299 // CHECK-NEXT:    [[TMP1091:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1089]], i64 [[TMP1090]] acq_rel acquire, align 8
3300 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3301 // CHECK-NEXT:    [[TMP1092:%.*]] = load i64, i64* [[ULE]], align 8
3302 // CHECK-NEXT:    [[TMP1093:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1092]] acq_rel, align 8
3303 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3304 // CHECK-NEXT:    [[TMP1094:%.*]] = load i64, i64* [[ULE]], align 8
3305 // CHECK-NEXT:    [[TMP1095:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1094]] acq_rel, align 8
3306 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3307 // CHECK-NEXT:    [[TMP1096:%.*]] = load i64, i64* [[ULE]], align 8
3308 // CHECK-NEXT:    [[TMP1097:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1096]] acq_rel, align 8
3309 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3310 // CHECK-NEXT:    [[TMP1098:%.*]] = load i64, i64* [[ULE]], align 8
3311 // CHECK-NEXT:    [[TMP1099:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1098]] acq_rel, align 8
3312 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3313 // CHECK-NEXT:    [[TMP1100:%.*]] = load i64, i64* [[ULE]], align 8
3314 // CHECK-NEXT:    [[TMP1101:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1100]] acq_rel, align 8
3315 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3316 // CHECK-NEXT:    [[TMP1102:%.*]] = load i64, i64* [[ULE]], align 8
3317 // CHECK-NEXT:    [[TMP1103:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1102]] acq_rel, align 8
3318 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3319 // CHECK-NEXT:    [[TMP1104:%.*]] = load i64, i64* [[ULE]], align 8
3320 // CHECK-NEXT:    [[TMP1105:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1104]] acq_rel, align 8
3321 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3322 // CHECK-NEXT:    [[TMP1106:%.*]] = load i64, i64* [[ULE]], align 8
3323 // CHECK-NEXT:    [[TMP1107:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1106]] acq_rel, align 8
3324 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3325 // CHECK-NEXT:    [[TMP1108:%.*]] = load i64, i64* [[ULE]], align 8
3326 // CHECK-NEXT:    [[TMP1109:%.*]] = load i64, i64* [[ULD]], align 8
3327 // CHECK-NEXT:    [[TMP1110:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1108]], i64 [[TMP1109]] acq_rel acquire, align 8
3328 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3329 // CHECK-NEXT:    [[TMP1111:%.*]] = load i64, i64* [[ULE]], align 8
3330 // CHECK-NEXT:    [[TMP1112:%.*]] = load i64, i64* [[ULD]], align 8
3331 // CHECK-NEXT:    [[TMP1113:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1111]], i64 [[TMP1112]] acq_rel acquire, align 8
3332 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3333 // CHECK-NEXT:    [[TMP1114:%.*]] = load i64, i64* [[ULE]], align 8
3334 // CHECK-NEXT:    [[TMP1115:%.*]] = load i64, i64* [[ULD]], align 8
3335 // CHECK-NEXT:    [[TMP1116:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1114]], i64 [[TMP1115]] acq_rel acquire, align 8
3336 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3337 // CHECK-NEXT:    [[TMP1117:%.*]] = load i64, i64* [[ULE]], align 8
3338 // CHECK-NEXT:    [[TMP1118:%.*]] = load i64, i64* [[ULD]], align 8
3339 // CHECK-NEXT:    [[TMP1119:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1117]], i64 [[TMP1118]] acq_rel acquire, align 8
3340 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3341 // CHECK-NEXT:    [[TMP1120:%.*]] = load i64, i64* [[LE]], align 8
3342 // CHECK-NEXT:    [[TMP1121:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1120]] acquire, align 8
3343 // CHECK-NEXT:    [[TMP1122:%.*]] = load i64, i64* [[LE]], align 8
3344 // CHECK-NEXT:    [[TMP1123:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1122]] acquire, align 8
3345 // CHECK-NEXT:    [[TMP1124:%.*]] = load i64, i64* [[LE]], align 8
3346 // CHECK-NEXT:    [[TMP1125:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1124]] acquire, align 8
3347 // CHECK-NEXT:    [[TMP1126:%.*]] = load i64, i64* [[LE]], align 8
3348 // CHECK-NEXT:    [[TMP1127:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1126]] acquire, align 8
3349 // CHECK-NEXT:    [[TMP1128:%.*]] = load i64, i64* [[LE]], align 8
3350 // CHECK-NEXT:    [[TMP1129:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1128]] acquire, align 8
3351 // CHECK-NEXT:    [[TMP1130:%.*]] = load i64, i64* [[LE]], align 8
3352 // CHECK-NEXT:    [[TMP1131:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1130]] acquire, align 8
3353 // CHECK-NEXT:    [[TMP1132:%.*]] = load i64, i64* [[LE]], align 8
3354 // CHECK-NEXT:    [[TMP1133:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1132]] acquire, align 8
3355 // CHECK-NEXT:    [[TMP1134:%.*]] = load i64, i64* [[LE]], align 8
3356 // CHECK-NEXT:    [[TMP1135:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1134]] acquire, align 8
3357 // CHECK-NEXT:    [[TMP1136:%.*]] = load i64, i64* [[LE]], align 8
3358 // CHECK-NEXT:    [[TMP1137:%.*]] = load i64, i64* [[LD]], align 8
3359 // CHECK-NEXT:    [[TMP1138:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1136]], i64 [[TMP1137]] acquire acquire, align 8
3360 // CHECK-NEXT:    [[TMP1139:%.*]] = load i64, i64* [[LE]], align 8
3361 // CHECK-NEXT:    [[TMP1140:%.*]] = load i64, i64* [[LD]], align 8
3362 // CHECK-NEXT:    [[TMP1141:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1139]], i64 [[TMP1140]] acquire acquire, align 8
3363 // CHECK-NEXT:    [[TMP1142:%.*]] = load i64, i64* [[LE]], align 8
3364 // CHECK-NEXT:    [[TMP1143:%.*]] = load i64, i64* [[LD]], align 8
3365 // CHECK-NEXT:    [[TMP1144:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1142]], i64 [[TMP1143]] acquire acquire, align 8
3366 // CHECK-NEXT:    [[TMP1145:%.*]] = load i64, i64* [[LE]], align 8
3367 // CHECK-NEXT:    [[TMP1146:%.*]] = load i64, i64* [[LD]], align 8
3368 // CHECK-NEXT:    [[TMP1147:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1145]], i64 [[TMP1146]] acquire acquire, align 8
3369 // CHECK-NEXT:    [[TMP1148:%.*]] = load i64, i64* [[ULE]], align 8
3370 // CHECK-NEXT:    [[TMP1149:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1148]] acquire, align 8
3371 // CHECK-NEXT:    [[TMP1150:%.*]] = load i64, i64* [[ULE]], align 8
3372 // CHECK-NEXT:    [[TMP1151:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1150]] acquire, align 8
3373 // CHECK-NEXT:    [[TMP1152:%.*]] = load i64, i64* [[ULE]], align 8
3374 // CHECK-NEXT:    [[TMP1153:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1152]] acquire, align 8
3375 // CHECK-NEXT:    [[TMP1154:%.*]] = load i64, i64* [[ULE]], align 8
3376 // CHECK-NEXT:    [[TMP1155:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1154]] acquire, align 8
3377 // CHECK-NEXT:    [[TMP1156:%.*]] = load i64, i64* [[ULE]], align 8
3378 // CHECK-NEXT:    [[TMP1157:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1156]] acquire, align 8
3379 // CHECK-NEXT:    [[TMP1158:%.*]] = load i64, i64* [[ULE]], align 8
3380 // CHECK-NEXT:    [[TMP1159:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1158]] acquire, align 8
3381 // CHECK-NEXT:    [[TMP1160:%.*]] = load i64, i64* [[ULE]], align 8
3382 // CHECK-NEXT:    [[TMP1161:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1160]] acquire, align 8
3383 // CHECK-NEXT:    [[TMP1162:%.*]] = load i64, i64* [[ULE]], align 8
3384 // CHECK-NEXT:    [[TMP1163:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1162]] acquire, align 8
3385 // CHECK-NEXT:    [[TMP1164:%.*]] = load i64, i64* [[ULE]], align 8
3386 // CHECK-NEXT:    [[TMP1165:%.*]] = load i64, i64* [[ULD]], align 8
3387 // CHECK-NEXT:    [[TMP1166:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1164]], i64 [[TMP1165]] acquire acquire, align 8
3388 // CHECK-NEXT:    [[TMP1167:%.*]] = load i64, i64* [[ULE]], align 8
3389 // CHECK-NEXT:    [[TMP1168:%.*]] = load i64, i64* [[ULD]], align 8
3390 // CHECK-NEXT:    [[TMP1169:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1167]], i64 [[TMP1168]] acquire acquire, align 8
3391 // CHECK-NEXT:    [[TMP1170:%.*]] = load i64, i64* [[ULE]], align 8
3392 // CHECK-NEXT:    [[TMP1171:%.*]] = load i64, i64* [[ULD]], align 8
3393 // CHECK-NEXT:    [[TMP1172:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1170]], i64 [[TMP1171]] acquire acquire, align 8
3394 // CHECK-NEXT:    [[TMP1173:%.*]] = load i64, i64* [[ULE]], align 8
3395 // CHECK-NEXT:    [[TMP1174:%.*]] = load i64, i64* [[ULD]], align 8
3396 // CHECK-NEXT:    [[TMP1175:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1173]], i64 [[TMP1174]] acquire acquire, align 8
3397 // CHECK-NEXT:    [[TMP1176:%.*]] = load i64, i64* [[LE]], align 8
3398 // CHECK-NEXT:    [[TMP1177:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1176]] monotonic, align 8
3399 // CHECK-NEXT:    [[TMP1178:%.*]] = load i64, i64* [[LE]], align 8
3400 // CHECK-NEXT:    [[TMP1179:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1178]] monotonic, align 8
3401 // CHECK-NEXT:    [[TMP1180:%.*]] = load i64, i64* [[LE]], align 8
3402 // CHECK-NEXT:    [[TMP1181:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1180]] monotonic, align 8
3403 // CHECK-NEXT:    [[TMP1182:%.*]] = load i64, i64* [[LE]], align 8
3404 // CHECK-NEXT:    [[TMP1183:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1182]] monotonic, align 8
3405 // CHECK-NEXT:    [[TMP1184:%.*]] = load i64, i64* [[LE]], align 8
3406 // CHECK-NEXT:    [[TMP1185:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1184]] monotonic, align 8
3407 // CHECK-NEXT:    [[TMP1186:%.*]] = load i64, i64* [[LE]], align 8
3408 // CHECK-NEXT:    [[TMP1187:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1186]] monotonic, align 8
3409 // CHECK-NEXT:    [[TMP1188:%.*]] = load i64, i64* [[LE]], align 8
3410 // CHECK-NEXT:    [[TMP1189:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1188]] monotonic, align 8
3411 // CHECK-NEXT:    [[TMP1190:%.*]] = load i64, i64* [[LE]], align 8
3412 // CHECK-NEXT:    [[TMP1191:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1190]] monotonic, align 8
3413 // CHECK-NEXT:    [[TMP1192:%.*]] = load i64, i64* [[LE]], align 8
3414 // CHECK-NEXT:    [[TMP1193:%.*]] = load i64, i64* [[LD]], align 8
3415 // CHECK-NEXT:    [[TMP1194:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1192]], i64 [[TMP1193]] monotonic monotonic, align 8
3416 // CHECK-NEXT:    [[TMP1195:%.*]] = load i64, i64* [[LE]], align 8
3417 // CHECK-NEXT:    [[TMP1196:%.*]] = load i64, i64* [[LD]], align 8
3418 // CHECK-NEXT:    [[TMP1197:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1195]], i64 [[TMP1196]] monotonic monotonic, align 8
3419 // CHECK-NEXT:    [[TMP1198:%.*]] = load i64, i64* [[LE]], align 8
3420 // CHECK-NEXT:    [[TMP1199:%.*]] = load i64, i64* [[LD]], align 8
3421 // CHECK-NEXT:    [[TMP1200:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1198]], i64 [[TMP1199]] monotonic monotonic, align 8
3422 // CHECK-NEXT:    [[TMP1201:%.*]] = load i64, i64* [[LE]], align 8
3423 // CHECK-NEXT:    [[TMP1202:%.*]] = load i64, i64* [[LD]], align 8
3424 // CHECK-NEXT:    [[TMP1203:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1201]], i64 [[TMP1202]] monotonic monotonic, align 8
3425 // CHECK-NEXT:    [[TMP1204:%.*]] = load i64, i64* [[ULE]], align 8
3426 // CHECK-NEXT:    [[TMP1205:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1204]] monotonic, align 8
3427 // CHECK-NEXT:    [[TMP1206:%.*]] = load i64, i64* [[ULE]], align 8
3428 // CHECK-NEXT:    [[TMP1207:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1206]] monotonic, align 8
3429 // CHECK-NEXT:    [[TMP1208:%.*]] = load i64, i64* [[ULE]], align 8
3430 // CHECK-NEXT:    [[TMP1209:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1208]] monotonic, align 8
3431 // CHECK-NEXT:    [[TMP1210:%.*]] = load i64, i64* [[ULE]], align 8
3432 // CHECK-NEXT:    [[TMP1211:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1210]] monotonic, align 8
3433 // CHECK-NEXT:    [[TMP1212:%.*]] = load i64, i64* [[ULE]], align 8
3434 // CHECK-NEXT:    [[TMP1213:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1212]] monotonic, align 8
3435 // CHECK-NEXT:    [[TMP1214:%.*]] = load i64, i64* [[ULE]], align 8
3436 // CHECK-NEXT:    [[TMP1215:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1214]] monotonic, align 8
3437 // CHECK-NEXT:    [[TMP1216:%.*]] = load i64, i64* [[ULE]], align 8
3438 // CHECK-NEXT:    [[TMP1217:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1216]] monotonic, align 8
3439 // CHECK-NEXT:    [[TMP1218:%.*]] = load i64, i64* [[ULE]], align 8
3440 // CHECK-NEXT:    [[TMP1219:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1218]] monotonic, align 8
3441 // CHECK-NEXT:    [[TMP1220:%.*]] = load i64, i64* [[ULE]], align 8
3442 // CHECK-NEXT:    [[TMP1221:%.*]] = load i64, i64* [[ULD]], align 8
3443 // CHECK-NEXT:    [[TMP1222:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1220]], i64 [[TMP1221]] monotonic monotonic, align 8
3444 // CHECK-NEXT:    [[TMP1223:%.*]] = load i64, i64* [[ULE]], align 8
3445 // CHECK-NEXT:    [[TMP1224:%.*]] = load i64, i64* [[ULD]], align 8
3446 // CHECK-NEXT:    [[TMP1225:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1223]], i64 [[TMP1224]] monotonic monotonic, align 8
3447 // CHECK-NEXT:    [[TMP1226:%.*]] = load i64, i64* [[ULE]], align 8
3448 // CHECK-NEXT:    [[TMP1227:%.*]] = load i64, i64* [[ULD]], align 8
3449 // CHECK-NEXT:    [[TMP1228:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1226]], i64 [[TMP1227]] monotonic monotonic, align 8
3450 // CHECK-NEXT:    [[TMP1229:%.*]] = load i64, i64* [[ULE]], align 8
3451 // CHECK-NEXT:    [[TMP1230:%.*]] = load i64, i64* [[ULD]], align 8
3452 // CHECK-NEXT:    [[TMP1231:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1229]], i64 [[TMP1230]] monotonic monotonic, align 8
3453 // CHECK-NEXT:    [[TMP1232:%.*]] = load i64, i64* [[LE]], align 8
3454 // CHECK-NEXT:    [[TMP1233:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1232]] release, align 8
3455 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3456 // CHECK-NEXT:    [[TMP1234:%.*]] = load i64, i64* [[LE]], align 8
3457 // CHECK-NEXT:    [[TMP1235:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1234]] release, align 8
3458 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3459 // CHECK-NEXT:    [[TMP1236:%.*]] = load i64, i64* [[LE]], align 8
3460 // CHECK-NEXT:    [[TMP1237:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1236]] release, align 8
3461 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3462 // CHECK-NEXT:    [[TMP1238:%.*]] = load i64, i64* [[LE]], align 8
3463 // CHECK-NEXT:    [[TMP1239:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1238]] release, align 8
3464 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3465 // CHECK-NEXT:    [[TMP1240:%.*]] = load i64, i64* [[LE]], align 8
3466 // CHECK-NEXT:    [[TMP1241:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1240]] release, align 8
3467 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3468 // CHECK-NEXT:    [[TMP1242:%.*]] = load i64, i64* [[LE]], align 8
3469 // CHECK-NEXT:    [[TMP1243:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1242]] release, align 8
3470 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3471 // CHECK-NEXT:    [[TMP1244:%.*]] = load i64, i64* [[LE]], align 8
3472 // CHECK-NEXT:    [[TMP1245:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1244]] release, align 8
3473 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3474 // CHECK-NEXT:    [[TMP1246:%.*]] = load i64, i64* [[LE]], align 8
3475 // CHECK-NEXT:    [[TMP1247:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1246]] release, align 8
3476 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3477 // CHECK-NEXT:    [[TMP1248:%.*]] = load i64, i64* [[LE]], align 8
3478 // CHECK-NEXT:    [[TMP1249:%.*]] = load i64, i64* [[LD]], align 8
3479 // CHECK-NEXT:    [[TMP1250:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1248]], i64 [[TMP1249]] release monotonic, align 8
3480 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3481 // CHECK-NEXT:    [[TMP1251:%.*]] = load i64, i64* [[LE]], align 8
3482 // CHECK-NEXT:    [[TMP1252:%.*]] = load i64, i64* [[LD]], align 8
3483 // CHECK-NEXT:    [[TMP1253:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1251]], i64 [[TMP1252]] release monotonic, align 8
3484 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3485 // CHECK-NEXT:    [[TMP1254:%.*]] = load i64, i64* [[LE]], align 8
3486 // CHECK-NEXT:    [[TMP1255:%.*]] = load i64, i64* [[LD]], align 8
3487 // CHECK-NEXT:    [[TMP1256:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1254]], i64 [[TMP1255]] release monotonic, align 8
3488 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3489 // CHECK-NEXT:    [[TMP1257:%.*]] = load i64, i64* [[LE]], align 8
3490 // CHECK-NEXT:    [[TMP1258:%.*]] = load i64, i64* [[LD]], align 8
3491 // CHECK-NEXT:    [[TMP1259:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1257]], i64 [[TMP1258]] release monotonic, align 8
3492 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3493 // CHECK-NEXT:    [[TMP1260:%.*]] = load i64, i64* [[ULE]], align 8
3494 // CHECK-NEXT:    [[TMP1261:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1260]] release, align 8
3495 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3496 // CHECK-NEXT:    [[TMP1262:%.*]] = load i64, i64* [[ULE]], align 8
3497 // CHECK-NEXT:    [[TMP1263:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1262]] release, align 8
3498 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3499 // CHECK-NEXT:    [[TMP1264:%.*]] = load i64, i64* [[ULE]], align 8
3500 // CHECK-NEXT:    [[TMP1265:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1264]] release, align 8
3501 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3502 // CHECK-NEXT:    [[TMP1266:%.*]] = load i64, i64* [[ULE]], align 8
3503 // CHECK-NEXT:    [[TMP1267:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1266]] release, align 8
3504 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3505 // CHECK-NEXT:    [[TMP1268:%.*]] = load i64, i64* [[ULE]], align 8
3506 // CHECK-NEXT:    [[TMP1269:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1268]] release, align 8
3507 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3508 // CHECK-NEXT:    [[TMP1270:%.*]] = load i64, i64* [[ULE]], align 8
3509 // CHECK-NEXT:    [[TMP1271:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1270]] release, align 8
3510 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3511 // CHECK-NEXT:    [[TMP1272:%.*]] = load i64, i64* [[ULE]], align 8
3512 // CHECK-NEXT:    [[TMP1273:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1272]] release, align 8
3513 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3514 // CHECK-NEXT:    [[TMP1274:%.*]] = load i64, i64* [[ULE]], align 8
3515 // CHECK-NEXT:    [[TMP1275:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1274]] release, align 8
3516 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3517 // CHECK-NEXT:    [[TMP1276:%.*]] = load i64, i64* [[ULE]], align 8
3518 // CHECK-NEXT:    [[TMP1277:%.*]] = load i64, i64* [[ULD]], align 8
3519 // CHECK-NEXT:    [[TMP1278:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1276]], i64 [[TMP1277]] release monotonic, align 8
3520 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3521 // CHECK-NEXT:    [[TMP1279:%.*]] = load i64, i64* [[ULE]], align 8
3522 // CHECK-NEXT:    [[TMP1280:%.*]] = load i64, i64* [[ULD]], align 8
3523 // CHECK-NEXT:    [[TMP1281:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1279]], i64 [[TMP1280]] release monotonic, align 8
3524 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3525 // CHECK-NEXT:    [[TMP1282:%.*]] = load i64, i64* [[ULE]], align 8
3526 // CHECK-NEXT:    [[TMP1283:%.*]] = load i64, i64* [[ULD]], align 8
3527 // CHECK-NEXT:    [[TMP1284:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1282]], i64 [[TMP1283]] release monotonic, align 8
3528 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3529 // CHECK-NEXT:    [[TMP1285:%.*]] = load i64, i64* [[ULE]], align 8
3530 // CHECK-NEXT:    [[TMP1286:%.*]] = load i64, i64* [[ULD]], align 8
3531 // CHECK-NEXT:    [[TMP1287:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1285]], i64 [[TMP1286]] release monotonic, align 8
3532 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3533 // CHECK-NEXT:    [[TMP1288:%.*]] = load i64, i64* [[LE]], align 8
3534 // CHECK-NEXT:    [[TMP1289:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1288]] seq_cst, align 8
3535 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3536 // CHECK-NEXT:    [[TMP1290:%.*]] = load i64, i64* [[LE]], align 8
3537 // CHECK-NEXT:    [[TMP1291:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1290]] seq_cst, align 8
3538 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3539 // CHECK-NEXT:    [[TMP1292:%.*]] = load i64, i64* [[LE]], align 8
3540 // CHECK-NEXT:    [[TMP1293:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1292]] seq_cst, align 8
3541 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3542 // CHECK-NEXT:    [[TMP1294:%.*]] = load i64, i64* [[LE]], align 8
3543 // CHECK-NEXT:    [[TMP1295:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1294]] seq_cst, align 8
3544 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3545 // CHECK-NEXT:    [[TMP1296:%.*]] = load i64, i64* [[LE]], align 8
3546 // CHECK-NEXT:    [[TMP1297:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1296]] seq_cst, align 8
3547 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3548 // CHECK-NEXT:    [[TMP1298:%.*]] = load i64, i64* [[LE]], align 8
3549 // CHECK-NEXT:    [[TMP1299:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1298]] seq_cst, align 8
3550 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3551 // CHECK-NEXT:    [[TMP1300:%.*]] = load i64, i64* [[LE]], align 8
3552 // CHECK-NEXT:    [[TMP1301:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1300]] seq_cst, align 8
3553 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3554 // CHECK-NEXT:    [[TMP1302:%.*]] = load i64, i64* [[LE]], align 8
3555 // CHECK-NEXT:    [[TMP1303:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1302]] seq_cst, align 8
3556 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3557 // CHECK-NEXT:    [[TMP1304:%.*]] = load i64, i64* [[LE]], align 8
3558 // CHECK-NEXT:    [[TMP1305:%.*]] = load i64, i64* [[LD]], align 8
3559 // CHECK-NEXT:    [[TMP1306:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1304]], i64 [[TMP1305]] seq_cst seq_cst, align 8
3560 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3561 // CHECK-NEXT:    [[TMP1307:%.*]] = load i64, i64* [[LE]], align 8
3562 // CHECK-NEXT:    [[TMP1308:%.*]] = load i64, i64* [[LD]], align 8
3563 // CHECK-NEXT:    [[TMP1309:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1307]], i64 [[TMP1308]] seq_cst seq_cst, align 8
3564 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3565 // CHECK-NEXT:    [[TMP1310:%.*]] = load i64, i64* [[LE]], align 8
3566 // CHECK-NEXT:    [[TMP1311:%.*]] = load i64, i64* [[LD]], align 8
3567 // CHECK-NEXT:    [[TMP1312:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1310]], i64 [[TMP1311]] seq_cst seq_cst, align 8
3568 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3569 // CHECK-NEXT:    [[TMP1313:%.*]] = load i64, i64* [[LE]], align 8
3570 // CHECK-NEXT:    [[TMP1314:%.*]] = load i64, i64* [[LD]], align 8
3571 // CHECK-NEXT:    [[TMP1315:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1313]], i64 [[TMP1314]] seq_cst seq_cst, align 8
3572 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3573 // CHECK-NEXT:    [[TMP1316:%.*]] = load i64, i64* [[ULE]], align 8
3574 // CHECK-NEXT:    [[TMP1317:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1316]] seq_cst, align 8
3575 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3576 // CHECK-NEXT:    [[TMP1318:%.*]] = load i64, i64* [[ULE]], align 8
3577 // CHECK-NEXT:    [[TMP1319:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1318]] seq_cst, align 8
3578 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3579 // CHECK-NEXT:    [[TMP1320:%.*]] = load i64, i64* [[ULE]], align 8
3580 // CHECK-NEXT:    [[TMP1321:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1320]] seq_cst, align 8
3581 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3582 // CHECK-NEXT:    [[TMP1322:%.*]] = load i64, i64* [[ULE]], align 8
3583 // CHECK-NEXT:    [[TMP1323:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1322]] seq_cst, align 8
3584 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3585 // CHECK-NEXT:    [[TMP1324:%.*]] = load i64, i64* [[ULE]], align 8
3586 // CHECK-NEXT:    [[TMP1325:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1324]] seq_cst, align 8
3587 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3588 // CHECK-NEXT:    [[TMP1326:%.*]] = load i64, i64* [[ULE]], align 8
3589 // CHECK-NEXT:    [[TMP1327:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1326]] seq_cst, align 8
3590 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3591 // CHECK-NEXT:    [[TMP1328:%.*]] = load i64, i64* [[ULE]], align 8
3592 // CHECK-NEXT:    [[TMP1329:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1328]] seq_cst, align 8
3593 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3594 // CHECK-NEXT:    [[TMP1330:%.*]] = load i64, i64* [[ULE]], align 8
3595 // CHECK-NEXT:    [[TMP1331:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1330]] seq_cst, align 8
3596 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3597 // CHECK-NEXT:    [[TMP1332:%.*]] = load i64, i64* [[ULE]], align 8
3598 // CHECK-NEXT:    [[TMP1333:%.*]] = load i64, i64* [[ULD]], align 8
3599 // CHECK-NEXT:    [[TMP1334:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1332]], i64 [[TMP1333]] seq_cst seq_cst, align 8
3600 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3601 // CHECK-NEXT:    [[TMP1335:%.*]] = load i64, i64* [[ULE]], align 8
3602 // CHECK-NEXT:    [[TMP1336:%.*]] = load i64, i64* [[ULD]], align 8
3603 // CHECK-NEXT:    [[TMP1337:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1335]], i64 [[TMP1336]] seq_cst seq_cst, align 8
3604 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3605 // CHECK-NEXT:    [[TMP1338:%.*]] = load i64, i64* [[ULE]], align 8
3606 // CHECK-NEXT:    [[TMP1339:%.*]] = load i64, i64* [[ULD]], align 8
3607 // CHECK-NEXT:    [[TMP1340:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1338]], i64 [[TMP1339]] seq_cst seq_cst, align 8
3608 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3609 // CHECK-NEXT:    [[TMP1341:%.*]] = load i64, i64* [[ULE]], align 8
3610 // CHECK-NEXT:    [[TMP1342:%.*]] = load i64, i64* [[ULD]], align 8
3611 // CHECK-NEXT:    [[TMP1343:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1341]], i64 [[TMP1342]] seq_cst seq_cst, align 8
3612 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3613 // CHECK-NEXT:    [[TMP1344:%.*]] = load i64, i64* [[LLE]], align 8
3614 // CHECK-NEXT:    [[TMP1345:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1344]] monotonic, align 8
3615 // CHECK-NEXT:    [[TMP1346:%.*]] = load i64, i64* [[LLE]], align 8
3616 // CHECK-NEXT:    [[TMP1347:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1346]] monotonic, align 8
3617 // CHECK-NEXT:    [[TMP1348:%.*]] = load i64, i64* [[LLE]], align 8
3618 // CHECK-NEXT:    [[TMP1349:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1348]] monotonic, align 8
3619 // CHECK-NEXT:    [[TMP1350:%.*]] = load i64, i64* [[LLE]], align 8
3620 // CHECK-NEXT:    [[TMP1351:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1350]] monotonic, align 8
3621 // CHECK-NEXT:    [[TMP1352:%.*]] = load i64, i64* [[LLE]], align 8
3622 // CHECK-NEXT:    [[TMP1353:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1352]] monotonic, align 8
3623 // CHECK-NEXT:    [[TMP1354:%.*]] = load i64, i64* [[LLE]], align 8
3624 // CHECK-NEXT:    [[TMP1355:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1354]] monotonic, align 8
3625 // CHECK-NEXT:    [[TMP1356:%.*]] = load i64, i64* [[LLE]], align 8
3626 // CHECK-NEXT:    [[TMP1357:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1356]] monotonic, align 8
3627 // CHECK-NEXT:    [[TMP1358:%.*]] = load i64, i64* [[LLE]], align 8
3628 // CHECK-NEXT:    [[TMP1359:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1358]] monotonic, align 8
3629 // CHECK-NEXT:    [[TMP1360:%.*]] = load i64, i64* [[LLE]], align 8
3630 // CHECK-NEXT:    [[TMP1361:%.*]] = load i64, i64* [[LLD]], align 8
3631 // CHECK-NEXT:    [[TMP1362:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1360]], i64 [[TMP1361]] monotonic monotonic, align 8
3632 // CHECK-NEXT:    [[TMP1363:%.*]] = load i64, i64* [[LLE]], align 8
3633 // CHECK-NEXT:    [[TMP1364:%.*]] = load i64, i64* [[LLD]], align 8
3634 // CHECK-NEXT:    [[TMP1365:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1363]], i64 [[TMP1364]] monotonic monotonic, align 8
3635 // CHECK-NEXT:    [[TMP1366:%.*]] = load i64, i64* [[LLE]], align 8
3636 // CHECK-NEXT:    [[TMP1367:%.*]] = load i64, i64* [[LLD]], align 8
3637 // CHECK-NEXT:    [[TMP1368:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1366]], i64 [[TMP1367]] monotonic monotonic, align 8
3638 // CHECK-NEXT:    [[TMP1369:%.*]] = load i64, i64* [[LLE]], align 8
3639 // CHECK-NEXT:    [[TMP1370:%.*]] = load i64, i64* [[LLD]], align 8
3640 // CHECK-NEXT:    [[TMP1371:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1369]], i64 [[TMP1370]] monotonic monotonic, align 8
3641 // CHECK-NEXT:    [[TMP1372:%.*]] = load i64, i64* [[ULLE]], align 8
3642 // CHECK-NEXT:    [[TMP1373:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1372]] monotonic, align 8
3643 // CHECK-NEXT:    [[TMP1374:%.*]] = load i64, i64* [[ULLE]], align 8
3644 // CHECK-NEXT:    [[TMP1375:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1374]] monotonic, align 8
3645 // CHECK-NEXT:    [[TMP1376:%.*]] = load i64, i64* [[ULLE]], align 8
3646 // CHECK-NEXT:    [[TMP1377:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1376]] monotonic, align 8
3647 // CHECK-NEXT:    [[TMP1378:%.*]] = load i64, i64* [[ULLE]], align 8
3648 // CHECK-NEXT:    [[TMP1379:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1378]] monotonic, align 8
3649 // CHECK-NEXT:    [[TMP1380:%.*]] = load i64, i64* [[ULLE]], align 8
3650 // CHECK-NEXT:    [[TMP1381:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1380]] monotonic, align 8
3651 // CHECK-NEXT:    [[TMP1382:%.*]] = load i64, i64* [[ULLE]], align 8
3652 // CHECK-NEXT:    [[TMP1383:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1382]] monotonic, align 8
3653 // CHECK-NEXT:    [[TMP1384:%.*]] = load i64, i64* [[ULLE]], align 8
3654 // CHECK-NEXT:    [[TMP1385:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1384]] monotonic, align 8
3655 // CHECK-NEXT:    [[TMP1386:%.*]] = load i64, i64* [[ULLE]], align 8
3656 // CHECK-NEXT:    [[TMP1387:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1386]] monotonic, align 8
3657 // CHECK-NEXT:    [[TMP1388:%.*]] = load i64, i64* [[ULLE]], align 8
3658 // CHECK-NEXT:    [[TMP1389:%.*]] = load i64, i64* [[ULLD]], align 8
3659 // CHECK-NEXT:    [[TMP1390:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1388]], i64 [[TMP1389]] monotonic monotonic, align 8
3660 // CHECK-NEXT:    [[TMP1391:%.*]] = load i64, i64* [[ULLE]], align 8
3661 // CHECK-NEXT:    [[TMP1392:%.*]] = load i64, i64* [[ULLD]], align 8
3662 // CHECK-NEXT:    [[TMP1393:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1391]], i64 [[TMP1392]] monotonic monotonic, align 8
3663 // CHECK-NEXT:    [[TMP1394:%.*]] = load i64, i64* [[ULLE]], align 8
3664 // CHECK-NEXT:    [[TMP1395:%.*]] = load i64, i64* [[ULLD]], align 8
3665 // CHECK-NEXT:    [[TMP1396:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1394]], i64 [[TMP1395]] monotonic monotonic, align 8
3666 // CHECK-NEXT:    [[TMP1397:%.*]] = load i64, i64* [[ULLE]], align 8
3667 // CHECK-NEXT:    [[TMP1398:%.*]] = load i64, i64* [[ULLD]], align 8
3668 // CHECK-NEXT:    [[TMP1399:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1397]], i64 [[TMP1398]] monotonic monotonic, align 8
3669 // CHECK-NEXT:    [[TMP1400:%.*]] = load i64, i64* [[LLE]], align 8
3670 // CHECK-NEXT:    [[TMP1401:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1400]] acq_rel, align 8
3671 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3672 // CHECK-NEXT:    [[TMP1402:%.*]] = load i64, i64* [[LLE]], align 8
3673 // CHECK-NEXT:    [[TMP1403:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1402]] acq_rel, align 8
3674 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3675 // CHECK-NEXT:    [[TMP1404:%.*]] = load i64, i64* [[LLE]], align 8
3676 // CHECK-NEXT:    [[TMP1405:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1404]] acq_rel, align 8
3677 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3678 // CHECK-NEXT:    [[TMP1406:%.*]] = load i64, i64* [[LLE]], align 8
3679 // CHECK-NEXT:    [[TMP1407:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1406]] acq_rel, align 8
3680 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3681 // CHECK-NEXT:    [[TMP1408:%.*]] = load i64, i64* [[LLE]], align 8
3682 // CHECK-NEXT:    [[TMP1409:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1408]] acq_rel, align 8
3683 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3684 // CHECK-NEXT:    [[TMP1410:%.*]] = load i64, i64* [[LLE]], align 8
3685 // CHECK-NEXT:    [[TMP1411:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1410]] acq_rel, align 8
3686 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3687 // CHECK-NEXT:    [[TMP1412:%.*]] = load i64, i64* [[LLE]], align 8
3688 // CHECK-NEXT:    [[TMP1413:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1412]] acq_rel, align 8
3689 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3690 // CHECK-NEXT:    [[TMP1414:%.*]] = load i64, i64* [[LLE]], align 8
3691 // CHECK-NEXT:    [[TMP1415:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1414]] acq_rel, align 8
3692 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3693 // CHECK-NEXT:    [[TMP1416:%.*]] = load i64, i64* [[LLE]], align 8
3694 // CHECK-NEXT:    [[TMP1417:%.*]] = load i64, i64* [[LLD]], align 8
3695 // CHECK-NEXT:    [[TMP1418:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1416]], i64 [[TMP1417]] acq_rel acquire, align 8
3696 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3697 // CHECK-NEXT:    [[TMP1419:%.*]] = load i64, i64* [[LLE]], align 8
3698 // CHECK-NEXT:    [[TMP1420:%.*]] = load i64, i64* [[LLD]], align 8
3699 // CHECK-NEXT:    [[TMP1421:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1419]], i64 [[TMP1420]] acq_rel acquire, align 8
3700 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3701 // CHECK-NEXT:    [[TMP1422:%.*]] = load i64, i64* [[LLE]], align 8
3702 // CHECK-NEXT:    [[TMP1423:%.*]] = load i64, i64* [[LLD]], align 8
3703 // CHECK-NEXT:    [[TMP1424:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1422]], i64 [[TMP1423]] acq_rel acquire, align 8
3704 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3705 // CHECK-NEXT:    [[TMP1425:%.*]] = load i64, i64* [[LLE]], align 8
3706 // CHECK-NEXT:    [[TMP1426:%.*]] = load i64, i64* [[LLD]], align 8
3707 // CHECK-NEXT:    [[TMP1427:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1425]], i64 [[TMP1426]] acq_rel acquire, align 8
3708 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3709 // CHECK-NEXT:    [[TMP1428:%.*]] = load i64, i64* [[ULLE]], align 8
3710 // CHECK-NEXT:    [[TMP1429:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1428]] acq_rel, align 8
3711 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3712 // CHECK-NEXT:    [[TMP1430:%.*]] = load i64, i64* [[ULLE]], align 8
3713 // CHECK-NEXT:    [[TMP1431:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1430]] acq_rel, align 8
3714 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3715 // CHECK-NEXT:    [[TMP1432:%.*]] = load i64, i64* [[ULLE]], align 8
3716 // CHECK-NEXT:    [[TMP1433:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1432]] acq_rel, align 8
3717 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3718 // CHECK-NEXT:    [[TMP1434:%.*]] = load i64, i64* [[ULLE]], align 8
3719 // CHECK-NEXT:    [[TMP1435:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1434]] acq_rel, align 8
3720 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3721 // CHECK-NEXT:    [[TMP1436:%.*]] = load i64, i64* [[ULLE]], align 8
3722 // CHECK-NEXT:    [[TMP1437:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1436]] acq_rel, align 8
3723 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3724 // CHECK-NEXT:    [[TMP1438:%.*]] = load i64, i64* [[ULLE]], align 8
3725 // CHECK-NEXT:    [[TMP1439:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1438]] acq_rel, align 8
3726 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3727 // CHECK-NEXT:    [[TMP1440:%.*]] = load i64, i64* [[ULLE]], align 8
3728 // CHECK-NEXT:    [[TMP1441:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1440]] acq_rel, align 8
3729 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3730 // CHECK-NEXT:    [[TMP1442:%.*]] = load i64, i64* [[ULLE]], align 8
3731 // CHECK-NEXT:    [[TMP1443:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1442]] acq_rel, align 8
3732 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3733 // CHECK-NEXT:    [[TMP1444:%.*]] = load i64, i64* [[ULLE]], align 8
3734 // CHECK-NEXT:    [[TMP1445:%.*]] = load i64, i64* [[ULLD]], align 8
3735 // CHECK-NEXT:    [[TMP1446:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1444]], i64 [[TMP1445]] acq_rel acquire, align 8
3736 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3737 // CHECK-NEXT:    [[TMP1447:%.*]] = load i64, i64* [[ULLE]], align 8
3738 // CHECK-NEXT:    [[TMP1448:%.*]] = load i64, i64* [[ULLD]], align 8
3739 // CHECK-NEXT:    [[TMP1449:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1447]], i64 [[TMP1448]] acq_rel acquire, align 8
3740 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3741 // CHECK-NEXT:    [[TMP1450:%.*]] = load i64, i64* [[ULLE]], align 8
3742 // CHECK-NEXT:    [[TMP1451:%.*]] = load i64, i64* [[ULLD]], align 8
3743 // CHECK-NEXT:    [[TMP1452:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1450]], i64 [[TMP1451]] acq_rel acquire, align 8
3744 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3745 // CHECK-NEXT:    [[TMP1453:%.*]] = load i64, i64* [[ULLE]], align 8
3746 // CHECK-NEXT:    [[TMP1454:%.*]] = load i64, i64* [[ULLD]], align 8
3747 // CHECK-NEXT:    [[TMP1455:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1453]], i64 [[TMP1454]] acq_rel acquire, align 8
3748 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3749 // CHECK-NEXT:    [[TMP1456:%.*]] = load i64, i64* [[LLE]], align 8
3750 // CHECK-NEXT:    [[TMP1457:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1456]] acquire, align 8
3751 // CHECK-NEXT:    [[TMP1458:%.*]] = load i64, i64* [[LLE]], align 8
3752 // CHECK-NEXT:    [[TMP1459:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1458]] acquire, align 8
3753 // CHECK-NEXT:    [[TMP1460:%.*]] = load i64, i64* [[LLE]], align 8
3754 // CHECK-NEXT:    [[TMP1461:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1460]] acquire, align 8
3755 // CHECK-NEXT:    [[TMP1462:%.*]] = load i64, i64* [[LLE]], align 8
3756 // CHECK-NEXT:    [[TMP1463:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1462]] acquire, align 8
3757 // CHECK-NEXT:    [[TMP1464:%.*]] = load i64, i64* [[LLE]], align 8
3758 // CHECK-NEXT:    [[TMP1465:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1464]] acquire, align 8
3759 // CHECK-NEXT:    [[TMP1466:%.*]] = load i64, i64* [[LLE]], align 8
3760 // CHECK-NEXT:    [[TMP1467:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1466]] acquire, align 8
3761 // CHECK-NEXT:    [[TMP1468:%.*]] = load i64, i64* [[LLE]], align 8
3762 // CHECK-NEXT:    [[TMP1469:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1468]] acquire, align 8
3763 // CHECK-NEXT:    [[TMP1470:%.*]] = load i64, i64* [[LLE]], align 8
3764 // CHECK-NEXT:    [[TMP1471:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1470]] acquire, align 8
3765 // CHECK-NEXT:    [[TMP1472:%.*]] = load i64, i64* [[LLE]], align 8
3766 // CHECK-NEXT:    [[TMP1473:%.*]] = load i64, i64* [[LLD]], align 8
3767 // CHECK-NEXT:    [[TMP1474:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1472]], i64 [[TMP1473]] acquire acquire, align 8
3768 // CHECK-NEXT:    [[TMP1475:%.*]] = load i64, i64* [[LLE]], align 8
3769 // CHECK-NEXT:    [[TMP1476:%.*]] = load i64, i64* [[LLD]], align 8
3770 // CHECK-NEXT:    [[TMP1477:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1475]], i64 [[TMP1476]] acquire acquire, align 8
3771 // CHECK-NEXT:    [[TMP1478:%.*]] = load i64, i64* [[LLE]], align 8
3772 // CHECK-NEXT:    [[TMP1479:%.*]] = load i64, i64* [[LLD]], align 8
3773 // CHECK-NEXT:    [[TMP1480:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1478]], i64 [[TMP1479]] acquire acquire, align 8
3774 // CHECK-NEXT:    [[TMP1481:%.*]] = load i64, i64* [[LLE]], align 8
3775 // CHECK-NEXT:    [[TMP1482:%.*]] = load i64, i64* [[LLD]], align 8
3776 // CHECK-NEXT:    [[TMP1483:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1481]], i64 [[TMP1482]] acquire acquire, align 8
3777 // CHECK-NEXT:    [[TMP1484:%.*]] = load i64, i64* [[ULLE]], align 8
3778 // CHECK-NEXT:    [[TMP1485:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1484]] acquire, align 8
3779 // CHECK-NEXT:    [[TMP1486:%.*]] = load i64, i64* [[ULLE]], align 8
3780 // CHECK-NEXT:    [[TMP1487:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1486]] acquire, align 8
3781 // CHECK-NEXT:    [[TMP1488:%.*]] = load i64, i64* [[ULLE]], align 8
3782 // CHECK-NEXT:    [[TMP1489:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1488]] acquire, align 8
3783 // CHECK-NEXT:    [[TMP1490:%.*]] = load i64, i64* [[ULLE]], align 8
3784 // CHECK-NEXT:    [[TMP1491:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1490]] acquire, align 8
3785 // CHECK-NEXT:    [[TMP1492:%.*]] = load i64, i64* [[ULLE]], align 8
3786 // CHECK-NEXT:    [[TMP1493:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1492]] acquire, align 8
3787 // CHECK-NEXT:    [[TMP1494:%.*]] = load i64, i64* [[ULLE]], align 8
3788 // CHECK-NEXT:    [[TMP1495:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1494]] acquire, align 8
3789 // CHECK-NEXT:    [[TMP1496:%.*]] = load i64, i64* [[ULLE]], align 8
3790 // CHECK-NEXT:    [[TMP1497:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1496]] acquire, align 8
3791 // CHECK-NEXT:    [[TMP1498:%.*]] = load i64, i64* [[ULLE]], align 8
3792 // CHECK-NEXT:    [[TMP1499:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1498]] acquire, align 8
3793 // CHECK-NEXT:    [[TMP1500:%.*]] = load i64, i64* [[ULLE]], align 8
3794 // CHECK-NEXT:    [[TMP1501:%.*]] = load i64, i64* [[ULLD]], align 8
3795 // CHECK-NEXT:    [[TMP1502:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1500]], i64 [[TMP1501]] acquire acquire, align 8
3796 // CHECK-NEXT:    [[TMP1503:%.*]] = load i64, i64* [[ULLE]], align 8
3797 // CHECK-NEXT:    [[TMP1504:%.*]] = load i64, i64* [[ULLD]], align 8
3798 // CHECK-NEXT:    [[TMP1505:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1503]], i64 [[TMP1504]] acquire acquire, align 8
3799 // CHECK-NEXT:    [[TMP1506:%.*]] = load i64, i64* [[ULLE]], align 8
3800 // CHECK-NEXT:    [[TMP1507:%.*]] = load i64, i64* [[ULLD]], align 8
3801 // CHECK-NEXT:    [[TMP1508:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1506]], i64 [[TMP1507]] acquire acquire, align 8
3802 // CHECK-NEXT:    [[TMP1509:%.*]] = load i64, i64* [[ULLE]], align 8
3803 // CHECK-NEXT:    [[TMP1510:%.*]] = load i64, i64* [[ULLD]], align 8
3804 // CHECK-NEXT:    [[TMP1511:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1509]], i64 [[TMP1510]] acquire acquire, align 8
3805 // CHECK-NEXT:    [[TMP1512:%.*]] = load i64, i64* [[LLE]], align 8
3806 // CHECK-NEXT:    [[TMP1513:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1512]] monotonic, align 8
3807 // CHECK-NEXT:    [[TMP1514:%.*]] = load i64, i64* [[LLE]], align 8
3808 // CHECK-NEXT:    [[TMP1515:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1514]] monotonic, align 8
3809 // CHECK-NEXT:    [[TMP1516:%.*]] = load i64, i64* [[LLE]], align 8
3810 // CHECK-NEXT:    [[TMP1517:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1516]] monotonic, align 8
3811 // CHECK-NEXT:    [[TMP1518:%.*]] = load i64, i64* [[LLE]], align 8
3812 // CHECK-NEXT:    [[TMP1519:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1518]] monotonic, align 8
3813 // CHECK-NEXT:    [[TMP1520:%.*]] = load i64, i64* [[LLE]], align 8
3814 // CHECK-NEXT:    [[TMP1521:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1520]] monotonic, align 8
3815 // CHECK-NEXT:    [[TMP1522:%.*]] = load i64, i64* [[LLE]], align 8
3816 // CHECK-NEXT:    [[TMP1523:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1522]] monotonic, align 8
3817 // CHECK-NEXT:    [[TMP1524:%.*]] = load i64, i64* [[LLE]], align 8
3818 // CHECK-NEXT:    [[TMP1525:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1524]] monotonic, align 8
3819 // CHECK-NEXT:    [[TMP1526:%.*]] = load i64, i64* [[LLE]], align 8
3820 // CHECK-NEXT:    [[TMP1527:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1526]] monotonic, align 8
3821 // CHECK-NEXT:    [[TMP1528:%.*]] = load i64, i64* [[LLE]], align 8
3822 // CHECK-NEXT:    [[TMP1529:%.*]] = load i64, i64* [[LLD]], align 8
3823 // CHECK-NEXT:    [[TMP1530:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1528]], i64 [[TMP1529]] monotonic monotonic, align 8
3824 // CHECK-NEXT:    [[TMP1531:%.*]] = load i64, i64* [[LLE]], align 8
3825 // CHECK-NEXT:    [[TMP1532:%.*]] = load i64, i64* [[LLD]], align 8
3826 // CHECK-NEXT:    [[TMP1533:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1531]], i64 [[TMP1532]] monotonic monotonic, align 8
3827 // CHECK-NEXT:    [[TMP1534:%.*]] = load i64, i64* [[LLE]], align 8
3828 // CHECK-NEXT:    [[TMP1535:%.*]] = load i64, i64* [[LLD]], align 8
3829 // CHECK-NEXT:    [[TMP1536:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1534]], i64 [[TMP1535]] monotonic monotonic, align 8
3830 // CHECK-NEXT:    [[TMP1537:%.*]] = load i64, i64* [[LLE]], align 8
3831 // CHECK-NEXT:    [[TMP1538:%.*]] = load i64, i64* [[LLD]], align 8
3832 // CHECK-NEXT:    [[TMP1539:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1537]], i64 [[TMP1538]] monotonic monotonic, align 8
3833 // CHECK-NEXT:    [[TMP1540:%.*]] = load i64, i64* [[ULLE]], align 8
3834 // CHECK-NEXT:    [[TMP1541:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1540]] monotonic, align 8
3835 // CHECK-NEXT:    [[TMP1542:%.*]] = load i64, i64* [[ULLE]], align 8
3836 // CHECK-NEXT:    [[TMP1543:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1542]] monotonic, align 8
3837 // CHECK-NEXT:    [[TMP1544:%.*]] = load i64, i64* [[ULLE]], align 8
3838 // CHECK-NEXT:    [[TMP1545:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1544]] monotonic, align 8
3839 // CHECK-NEXT:    [[TMP1546:%.*]] = load i64, i64* [[ULLE]], align 8
3840 // CHECK-NEXT:    [[TMP1547:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1546]] monotonic, align 8
3841 // CHECK-NEXT:    [[TMP1548:%.*]] = load i64, i64* [[ULLE]], align 8
3842 // CHECK-NEXT:    [[TMP1549:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1548]] monotonic, align 8
3843 // CHECK-NEXT:    [[TMP1550:%.*]] = load i64, i64* [[ULLE]], align 8
3844 // CHECK-NEXT:    [[TMP1551:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1550]] monotonic, align 8
3845 // CHECK-NEXT:    [[TMP1552:%.*]] = load i64, i64* [[ULLE]], align 8
3846 // CHECK-NEXT:    [[TMP1553:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1552]] monotonic, align 8
3847 // CHECK-NEXT:    [[TMP1554:%.*]] = load i64, i64* [[ULLE]], align 8
3848 // CHECK-NEXT:    [[TMP1555:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1554]] monotonic, align 8
3849 // CHECK-NEXT:    [[TMP1556:%.*]] = load i64, i64* [[ULLE]], align 8
3850 // CHECK-NEXT:    [[TMP1557:%.*]] = load i64, i64* [[ULLD]], align 8
3851 // CHECK-NEXT:    [[TMP1558:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1556]], i64 [[TMP1557]] monotonic monotonic, align 8
3852 // CHECK-NEXT:    [[TMP1559:%.*]] = load i64, i64* [[ULLE]], align 8
3853 // CHECK-NEXT:    [[TMP1560:%.*]] = load i64, i64* [[ULLD]], align 8
3854 // CHECK-NEXT:    [[TMP1561:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1559]], i64 [[TMP1560]] monotonic monotonic, align 8
3855 // CHECK-NEXT:    [[TMP1562:%.*]] = load i64, i64* [[ULLE]], align 8
3856 // CHECK-NEXT:    [[TMP1563:%.*]] = load i64, i64* [[ULLD]], align 8
3857 // CHECK-NEXT:    [[TMP1564:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1562]], i64 [[TMP1563]] monotonic monotonic, align 8
3858 // CHECK-NEXT:    [[TMP1565:%.*]] = load i64, i64* [[ULLE]], align 8
3859 // CHECK-NEXT:    [[TMP1566:%.*]] = load i64, i64* [[ULLD]], align 8
3860 // CHECK-NEXT:    [[TMP1567:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1565]], i64 [[TMP1566]] monotonic monotonic, align 8
3861 // CHECK-NEXT:    [[TMP1568:%.*]] = load i64, i64* [[LLE]], align 8
3862 // CHECK-NEXT:    [[TMP1569:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1568]] release, align 8
3863 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3864 // CHECK-NEXT:    [[TMP1570:%.*]] = load i64, i64* [[LLE]], align 8
3865 // CHECK-NEXT:    [[TMP1571:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1570]] release, align 8
3866 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3867 // CHECK-NEXT:    [[TMP1572:%.*]] = load i64, i64* [[LLE]], align 8
3868 // CHECK-NEXT:    [[TMP1573:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1572]] release, align 8
3869 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3870 // CHECK-NEXT:    [[TMP1574:%.*]] = load i64, i64* [[LLE]], align 8
3871 // CHECK-NEXT:    [[TMP1575:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1574]] release, align 8
3872 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3873 // CHECK-NEXT:    [[TMP1576:%.*]] = load i64, i64* [[LLE]], align 8
3874 // CHECK-NEXT:    [[TMP1577:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1576]] release, align 8
3875 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3876 // CHECK-NEXT:    [[TMP1578:%.*]] = load i64, i64* [[LLE]], align 8
3877 // CHECK-NEXT:    [[TMP1579:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1578]] release, align 8
3878 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3879 // CHECK-NEXT:    [[TMP1580:%.*]] = load i64, i64* [[LLE]], align 8
3880 // CHECK-NEXT:    [[TMP1581:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1580]] release, align 8
3881 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3882 // CHECK-NEXT:    [[TMP1582:%.*]] = load i64, i64* [[LLE]], align 8
3883 // CHECK-NEXT:    [[TMP1583:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1582]] release, align 8
3884 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3885 // CHECK-NEXT:    [[TMP1584:%.*]] = load i64, i64* [[LLE]], align 8
3886 // CHECK-NEXT:    [[TMP1585:%.*]] = load i64, i64* [[LLD]], align 8
3887 // CHECK-NEXT:    [[TMP1586:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1584]], i64 [[TMP1585]] release monotonic, align 8
3888 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3889 // CHECK-NEXT:    [[TMP1587:%.*]] = load i64, i64* [[LLE]], align 8
3890 // CHECK-NEXT:    [[TMP1588:%.*]] = load i64, i64* [[LLD]], align 8
3891 // CHECK-NEXT:    [[TMP1589:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1587]], i64 [[TMP1588]] release monotonic, align 8
3892 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3893 // CHECK-NEXT:    [[TMP1590:%.*]] = load i64, i64* [[LLE]], align 8
3894 // CHECK-NEXT:    [[TMP1591:%.*]] = load i64, i64* [[LLD]], align 8
3895 // CHECK-NEXT:    [[TMP1592:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1590]], i64 [[TMP1591]] release monotonic, align 8
3896 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3897 // CHECK-NEXT:    [[TMP1593:%.*]] = load i64, i64* [[LLE]], align 8
3898 // CHECK-NEXT:    [[TMP1594:%.*]] = load i64, i64* [[LLD]], align 8
3899 // CHECK-NEXT:    [[TMP1595:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1593]], i64 [[TMP1594]] release monotonic, align 8
3900 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3901 // CHECK-NEXT:    [[TMP1596:%.*]] = load i64, i64* [[ULLE]], align 8
3902 // CHECK-NEXT:    [[TMP1597:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1596]] release, align 8
3903 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3904 // CHECK-NEXT:    [[TMP1598:%.*]] = load i64, i64* [[ULLE]], align 8
3905 // CHECK-NEXT:    [[TMP1599:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1598]] release, align 8
3906 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3907 // CHECK-NEXT:    [[TMP1600:%.*]] = load i64, i64* [[ULLE]], align 8
3908 // CHECK-NEXT:    [[TMP1601:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1600]] release, align 8
3909 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3910 // CHECK-NEXT:    [[TMP1602:%.*]] = load i64, i64* [[ULLE]], align 8
3911 // CHECK-NEXT:    [[TMP1603:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1602]] release, align 8
3912 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3913 // CHECK-NEXT:    [[TMP1604:%.*]] = load i64, i64* [[ULLE]], align 8
3914 // CHECK-NEXT:    [[TMP1605:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1604]] release, align 8
3915 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3916 // CHECK-NEXT:    [[TMP1606:%.*]] = load i64, i64* [[ULLE]], align 8
3917 // CHECK-NEXT:    [[TMP1607:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1606]] release, align 8
3918 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3919 // CHECK-NEXT:    [[TMP1608:%.*]] = load i64, i64* [[ULLE]], align 8
3920 // CHECK-NEXT:    [[TMP1609:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1608]] release, align 8
3921 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3922 // CHECK-NEXT:    [[TMP1610:%.*]] = load i64, i64* [[ULLE]], align 8
3923 // CHECK-NEXT:    [[TMP1611:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1610]] release, align 8
3924 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3925 // CHECK-NEXT:    [[TMP1612:%.*]] = load i64, i64* [[ULLE]], align 8
3926 // CHECK-NEXT:    [[TMP1613:%.*]] = load i64, i64* [[ULLD]], align 8
3927 // CHECK-NEXT:    [[TMP1614:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1612]], i64 [[TMP1613]] release monotonic, align 8
3928 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3929 // CHECK-NEXT:    [[TMP1615:%.*]] = load i64, i64* [[ULLE]], align 8
3930 // CHECK-NEXT:    [[TMP1616:%.*]] = load i64, i64* [[ULLD]], align 8
3931 // CHECK-NEXT:    [[TMP1617:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1615]], i64 [[TMP1616]] release monotonic, align 8
3932 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3933 // CHECK-NEXT:    [[TMP1618:%.*]] = load i64, i64* [[ULLE]], align 8
3934 // CHECK-NEXT:    [[TMP1619:%.*]] = load i64, i64* [[ULLD]], align 8
3935 // CHECK-NEXT:    [[TMP1620:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1618]], i64 [[TMP1619]] release monotonic, align 8
3936 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3937 // CHECK-NEXT:    [[TMP1621:%.*]] = load i64, i64* [[ULLE]], align 8
3938 // CHECK-NEXT:    [[TMP1622:%.*]] = load i64, i64* [[ULLD]], align 8
3939 // CHECK-NEXT:    [[TMP1623:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1621]], i64 [[TMP1622]] release monotonic, align 8
3940 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3941 // CHECK-NEXT:    [[TMP1624:%.*]] = load i64, i64* [[LLE]], align 8
3942 // CHECK-NEXT:    [[TMP1625:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1624]] seq_cst, align 8
3943 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3944 // CHECK-NEXT:    [[TMP1626:%.*]] = load i64, i64* [[LLE]], align 8
3945 // CHECK-NEXT:    [[TMP1627:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1626]] seq_cst, align 8
3946 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3947 // CHECK-NEXT:    [[TMP1628:%.*]] = load i64, i64* [[LLE]], align 8
3948 // CHECK-NEXT:    [[TMP1629:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1628]] seq_cst, align 8
3949 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3950 // CHECK-NEXT:    [[TMP1630:%.*]] = load i64, i64* [[LLE]], align 8
3951 // CHECK-NEXT:    [[TMP1631:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1630]] seq_cst, align 8
3952 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3953 // CHECK-NEXT:    [[TMP1632:%.*]] = load i64, i64* [[LLE]], align 8
3954 // CHECK-NEXT:    [[TMP1633:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1632]] seq_cst, align 8
3955 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3956 // CHECK-NEXT:    [[TMP1634:%.*]] = load i64, i64* [[LLE]], align 8
3957 // CHECK-NEXT:    [[TMP1635:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1634]] seq_cst, align 8
3958 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3959 // CHECK-NEXT:    [[TMP1636:%.*]] = load i64, i64* [[LLE]], align 8
3960 // CHECK-NEXT:    [[TMP1637:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1636]] seq_cst, align 8
3961 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3962 // CHECK-NEXT:    [[TMP1638:%.*]] = load i64, i64* [[LLE]], align 8
3963 // CHECK-NEXT:    [[TMP1639:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1638]] seq_cst, align 8
3964 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3965 // CHECK-NEXT:    [[TMP1640:%.*]] = load i64, i64* [[LLE]], align 8
3966 // CHECK-NEXT:    [[TMP1641:%.*]] = load i64, i64* [[LLD]], align 8
3967 // CHECK-NEXT:    [[TMP1642:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1640]], i64 [[TMP1641]] seq_cst seq_cst, align 8
3968 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3969 // CHECK-NEXT:    [[TMP1643:%.*]] = load i64, i64* [[LLE]], align 8
3970 // CHECK-NEXT:    [[TMP1644:%.*]] = load i64, i64* [[LLD]], align 8
3971 // CHECK-NEXT:    [[TMP1645:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1643]], i64 [[TMP1644]] seq_cst seq_cst, align 8
3972 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3973 // CHECK-NEXT:    [[TMP1646:%.*]] = load i64, i64* [[LLE]], align 8
3974 // CHECK-NEXT:    [[TMP1647:%.*]] = load i64, i64* [[LLD]], align 8
3975 // CHECK-NEXT:    [[TMP1648:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1646]], i64 [[TMP1647]] seq_cst seq_cst, align 8
3976 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3977 // CHECK-NEXT:    [[TMP1649:%.*]] = load i64, i64* [[LLE]], align 8
3978 // CHECK-NEXT:    [[TMP1650:%.*]] = load i64, i64* [[LLD]], align 8
3979 // CHECK-NEXT:    [[TMP1651:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1649]], i64 [[TMP1650]] seq_cst seq_cst, align 8
3980 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3981 // CHECK-NEXT:    [[TMP1652:%.*]] = load i64, i64* [[ULLE]], align 8
3982 // CHECK-NEXT:    [[TMP1653:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1652]] seq_cst, align 8
3983 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3984 // CHECK-NEXT:    [[TMP1654:%.*]] = load i64, i64* [[ULLE]], align 8
3985 // CHECK-NEXT:    [[TMP1655:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1654]] seq_cst, align 8
3986 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3987 // CHECK-NEXT:    [[TMP1656:%.*]] = load i64, i64* [[ULLE]], align 8
3988 // CHECK-NEXT:    [[TMP1657:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1656]] seq_cst, align 8
3989 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3990 // CHECK-NEXT:    [[TMP1658:%.*]] = load i64, i64* [[ULLE]], align 8
3991 // CHECK-NEXT:    [[TMP1659:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1658]] seq_cst, align 8
3992 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3993 // CHECK-NEXT:    [[TMP1660:%.*]] = load i64, i64* [[ULLE]], align 8
3994 // CHECK-NEXT:    [[TMP1661:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1660]] seq_cst, align 8
3995 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3996 // CHECK-NEXT:    [[TMP1662:%.*]] = load i64, i64* [[ULLE]], align 8
3997 // CHECK-NEXT:    [[TMP1663:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1662]] seq_cst, align 8
3998 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
3999 // CHECK-NEXT:    [[TMP1664:%.*]] = load i64, i64* [[ULLE]], align 8
4000 // CHECK-NEXT:    [[TMP1665:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1664]] seq_cst, align 8
4001 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4002 // CHECK-NEXT:    [[TMP1666:%.*]] = load i64, i64* [[ULLE]], align 8
4003 // CHECK-NEXT:    [[TMP1667:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1666]] seq_cst, align 8
4004 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4005 // CHECK-NEXT:    [[TMP1668:%.*]] = load i64, i64* [[ULLE]], align 8
4006 // CHECK-NEXT:    [[TMP1669:%.*]] = load i64, i64* [[ULLD]], align 8
4007 // CHECK-NEXT:    [[TMP1670:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1668]], i64 [[TMP1669]] seq_cst seq_cst, align 8
4008 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4009 // CHECK-NEXT:    [[TMP1671:%.*]] = load i64, i64* [[ULLE]], align 8
4010 // CHECK-NEXT:    [[TMP1672:%.*]] = load i64, i64* [[ULLD]], align 8
4011 // CHECK-NEXT:    [[TMP1673:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1671]], i64 [[TMP1672]] seq_cst seq_cst, align 8
4012 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4013 // CHECK-NEXT:    [[TMP1674:%.*]] = load i64, i64* [[ULLE]], align 8
4014 // CHECK-NEXT:    [[TMP1675:%.*]] = load i64, i64* [[ULLD]], align 8
4015 // CHECK-NEXT:    [[TMP1676:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1674]], i64 [[TMP1675]] seq_cst seq_cst, align 8
4016 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4017 // CHECK-NEXT:    [[TMP1677:%.*]] = load i64, i64* [[ULLE]], align 8
4018 // CHECK-NEXT:    [[TMP1678:%.*]] = load i64, i64* [[ULLD]], align 8
4019 // CHECK-NEXT:    [[TMP1679:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1677]], i64 [[TMP1678]] seq_cst seq_cst, align 8
4020 // CHECK-NEXT:    call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]])
4021 // CHECK-NEXT:    ret void
4022