1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
3 #ifndef HEADER
4 #define HEADER
5 // Test host codegen.
6 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
7 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
9 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
10 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
12 
13 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
14 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
16 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
17 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
19 #ifdef CK1
20 
21 int Gbla;
22 long long Gblb;
23 int &Gblc = Gbla;
24 
teams_argument_global_local(int a)25 int teams_argument_global_local(int a){
26   int comp = 1;
27 
28   int la = 23;
29   float lc = 25.0;
30 
31   #pragma omp target
32   #pragma omp teams
33   {
34     ++comp;
35   }
36 
37   #pragma omp target
38   {{{
39     #pragma omp teams
40     {
41       ++comp;
42     }
43   }}}
44 
45 
46   #pragma omp target
47   #pragma omp teams num_teams(la)
48   {
49     ++comp;
50   }
51 
52 
53   #pragma omp target
54   #pragma omp teams thread_limit(la)
55   {
56     ++comp;
57   }
58 
59 
60 
61 
62   #pragma omp target
63   #pragma omp teams num_teams(Gbla+a) thread_limit(Gblb+(long long)lc)
64   {
65     ++comp;
66   }
67 
68 
69 
70 
71   #pragma omp target
72   #pragma omp teams num_teams(Gblc+1) thread_limit(Gblc+2)
73   {
74     comp += Gblc;
75   }
76 
77   return comp;
78 }
79 
80 #endif // CK1
81 
82 // Test host codegen.
83 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
84 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
85 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
86 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
87 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
88 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
89 
90 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
91 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
92 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
93 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
94 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
95 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
96 #ifdef CK2
97 
98 template <typename T>
99 struct SS{
100   T a;
101   float b;
102 };
103 
104 SS<int> Gbla;
105 SS<long long> Gblb;
106 
teams_template_arg(void)107 int teams_template_arg(void) {
108   int comp = 1;
109 
110   SS<int> la;
111   SS<long long> lb;
112 
113 
114 
115 
116   #pragma omp target
117   #pragma omp teams num_teams(Gbla.a) thread_limit((long long)la.b)
118   {
119     ++comp;
120   }
121 
122 
123 
124 
125   #pragma omp target
126   #pragma omp teams num_teams((long long)lb.b) thread_limit(Gblb.a)
127   {
128     ++comp;
129   }
130   return comp;
131 }
132 #endif // CK2
133 
134 // Test host codegen.
135 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
136 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
137 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK17
138 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
139 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
140 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK19
141 
142 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
143 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
144 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
145 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
146 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
147 // RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
148 #ifdef CK3
149 
150 
151 template <typename T, int X, long long Y>
152 struct SS{
153   T a;
154   float b;
155 
fooSS156   int foo(void) {
157     int comp = 1;
158 
159 
160 
161     #pragma omp target
162     #pragma omp teams num_teams(a) thread_limit(X)
163     {
164       ++comp;
165     }
166 
167 
168 
169     #pragma omp target
170     #pragma omp teams num_teams(Y) thread_limit((int)b+X)
171     {
172       ++comp;
173     }
174     return comp;
175   }
176 };
177 
teams_template_struct(void)178 int teams_template_struct(void) {
179   SS<int, 123, 456> V;
180   return V.foo();
181 
182 }
183 #endif // CK3
184 
185 // Test target codegen - host bc file has to be created first.
186 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
187 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK25
188 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
189 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK25
190 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
191 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK27
192 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
193 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK27
194 
195 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
196 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
197 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
198 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
199 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
200 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
201 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
202 // RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
203 
204 #ifdef CK4
205 
206 
207 template <typename T>
tmain(T argc)208 int tmain(T argc) {
209 #pragma omp target
210 #pragma omp teams
211   argc = 0;
212   return 0;
213 }
214 
main(int argc,char ** argv)215 int main (int argc, char **argv) {
216 #pragma omp target
217 #pragma omp teams
218   argc = 0;
219   return tmain(argv);
220 }
221 
222 
223 
224 
225 #endif // CK4
226 
227 // Test target codegen - host bc file has to be created first.
228 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
229 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK33
230 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
231 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK33
232 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
233 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK35
234 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
235 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK35
236 
237 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
238 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
239 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
240 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
241 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
242 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
243 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
244 // RUN: %clang_cc1 -no-opaque-pointers -DCK5 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
245 
246 // expected-no-diagnostics
247 #ifdef CK5
248 
249 
250 template <typename T>
tmain(T argc)251 int tmain(T argc) {
252   int a = 10;
253   int b = 5;
254 #pragma omp target
255 #pragma omp teams num_teams(a) thread_limit(b)
256   {
257   argc = 0;
258   }
259   return 0;
260 }
261 
main(int argc,char ** argv)262 int main (int argc, char **argv) {
263   int a = 20;
264   int b = 5;
265 #pragma omp target
266 #pragma omp teams num_teams(a) thread_limit(b)
267   {
268   argc = 0;
269   }
270   return tmain(argv);
271 }
272 
273 
274 
275 #endif // CK5
276 
277 // Test host codegen.
278 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK41
279 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
280 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp-version=50 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK41
281 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK43
282 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
283 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK43
284 
285 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -verify -fopenmp-version=50 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
286 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp-simd -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
287 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp-simd -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
288 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -verify -fopenmp-simd -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
289 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp-simd -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
290 // RUN: %clang_cc1 -no-opaque-pointers -DCK6 -fopenmp-simd -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
291 #ifdef CK6
292 
foo()293 void foo() {
294 #pragma omp teams
295   ;
296 }
297 
298 #endif // CK6
299 
300 #endif
301 // CHECK1-LABEL: define {{[^@]+}}@_Z27teams_argument_global_locali
302 // CHECK1-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
303 // CHECK1-NEXT:  entry:
304 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
305 // CHECK1-NEXT:    [[COMP:%.*]] = alloca i32, align 4
306 // CHECK1-NEXT:    [[LA:%.*]] = alloca i32, align 4
307 // CHECK1-NEXT:    [[LC:%.*]] = alloca float, align 4
308 // CHECK1-NEXT:    [[COMP_CASTED:%.*]] = alloca i64, align 8
309 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
310 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
311 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
312 // CHECK1-NEXT:    [[COMP_CASTED1:%.*]] = alloca i64, align 8
313 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
314 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
315 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
316 // CHECK1-NEXT:    [[LA_CASTED:%.*]] = alloca i64, align 8
317 // CHECK1-NEXT:    [[COMP_CASTED10:%.*]] = alloca i64, align 8
318 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS12:%.*]] = alloca [2 x i8*], align 8
319 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS13:%.*]] = alloca [2 x i8*], align 8
320 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS14:%.*]] = alloca [2 x i8*], align 8
321 // CHECK1-NEXT:    [[LA_CASTED18:%.*]] = alloca i64, align 8
322 // CHECK1-NEXT:    [[COMP_CASTED20:%.*]] = alloca i64, align 8
323 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
324 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
325 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
326 // CHECK1-NEXT:    [[GBLA_CASTED:%.*]] = alloca i64, align 8
327 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
328 // CHECK1-NEXT:    [[GBLB_CASTED:%.*]] = alloca i64, align 8
329 // CHECK1-NEXT:    [[LC_CASTED:%.*]] = alloca i64, align 8
330 // CHECK1-NEXT:    [[COMP_CASTED31:%.*]] = alloca i64, align 8
331 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [5 x i8*], align 8
332 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [5 x i8*], align 8
333 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [5 x i8*], align 8
334 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32*, align 8
335 // CHECK1-NEXT:    [[GBLC_CASTED:%.*]] = alloca i64, align 8
336 // CHECK1-NEXT:    [[COMP_CASTED42:%.*]] = alloca i64, align 8
337 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [2 x i8*], align 8
338 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [2 x i8*], align 8
339 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [2 x i8*], align 8
340 // CHECK1-NEXT:    [[_TMP47:%.*]] = alloca i32*, align 8
341 // CHECK1-NEXT:    [[_TMP49:%.*]] = alloca i32*, align 8
342 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
343 // CHECK1-NEXT:    store i32 1, i32* [[COMP]], align 4
344 // CHECK1-NEXT:    store i32 23, i32* [[LA]], align 4
345 // CHECK1-NEXT:    store float 2.500000e+01, float* [[LC]], align 4
346 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
347 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_CASTED]] to i32*
348 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
349 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[COMP_CASTED]], align 8
350 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
351 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i64*
352 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP3]], align 8
353 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
354 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i64*
355 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP5]], align 8
356 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
357 // CHECK1-NEXT:    store i8* null, i8** [[TMP6]], align 8
358 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
359 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
360 // CHECK1-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
361 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
362 // CHECK1-NEXT:    store i32 1, i32* [[TMP9]], align 4
363 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
364 // CHECK1-NEXT:    store i32 1, i32* [[TMP10]], align 4
365 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
366 // CHECK1-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 8
367 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
368 // CHECK1-NEXT:    store i8** [[TMP8]], i8*** [[TMP12]], align 8
369 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
370 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP13]], align 8
371 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
372 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP14]], align 8
373 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
374 // CHECK1-NEXT:    store i8** null, i8*** [[TMP15]], align 8
375 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
376 // CHECK1-NEXT:    store i8** null, i8*** [[TMP16]], align 8
377 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
378 // CHECK1-NEXT:    store i64 0, i64* [[TMP17]], align 8
379 // CHECK1-NEXT:    [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
380 // CHECK1-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
381 // CHECK1-NEXT:    br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
382 // CHECK1:       omp_offload.failed:
383 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31(i64 [[TMP1]]) #[[ATTR2:[0-9]+]]
384 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
385 // CHECK1:       omp_offload.cont:
386 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[COMP]], align 4
387 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[COMP_CASTED1]] to i32*
388 // CHECK1-NEXT:    store i32 [[TMP20]], i32* [[CONV2]], align 4
389 // CHECK1-NEXT:    [[TMP21:%.*]] = load i64, i64* [[COMP_CASTED1]], align 8
390 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
391 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
392 // CHECK1-NEXT:    store i64 [[TMP21]], i64* [[TMP23]], align 8
393 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
394 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
395 // CHECK1-NEXT:    store i64 [[TMP21]], i64* [[TMP25]], align 8
396 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
397 // CHECK1-NEXT:    store i8* null, i8** [[TMP26]], align 8
398 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
399 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
400 // CHECK1-NEXT:    [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
401 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
402 // CHECK1-NEXT:    store i32 1, i32* [[TMP29]], align 4
403 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
404 // CHECK1-NEXT:    store i32 1, i32* [[TMP30]], align 4
405 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
406 // CHECK1-NEXT:    store i8** [[TMP27]], i8*** [[TMP31]], align 8
407 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
408 // CHECK1-NEXT:    store i8** [[TMP28]], i8*** [[TMP32]], align 8
409 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
410 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP33]], align 8
411 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
412 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP34]], align 8
413 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
414 // CHECK1-NEXT:    store i8** null, i8*** [[TMP35]], align 8
415 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
416 // CHECK1-NEXT:    store i8** null, i8*** [[TMP36]], align 8
417 // CHECK1-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
418 // CHECK1-NEXT:    store i64 0, i64* [[TMP37]], align 8
419 // CHECK1-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
420 // CHECK1-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
421 // CHECK1-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
422 // CHECK1:       omp_offload.failed7:
423 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37(i64 [[TMP21]]) #[[ATTR2]]
424 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
425 // CHECK1:       omp_offload.cont8:
426 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[LA]], align 4
427 // CHECK1-NEXT:    [[CONV9:%.*]] = bitcast i64* [[LA_CASTED]] to i32*
428 // CHECK1-NEXT:    store i32 [[TMP40]], i32* [[CONV9]], align 4
429 // CHECK1-NEXT:    [[TMP41:%.*]] = load i64, i64* [[LA_CASTED]], align 8
430 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[COMP]], align 4
431 // CHECK1-NEXT:    [[CONV11:%.*]] = bitcast i64* [[COMP_CASTED10]] to i32*
432 // CHECK1-NEXT:    store i32 [[TMP42]], i32* [[CONV11]], align 4
433 // CHECK1-NEXT:    [[TMP43:%.*]] = load i64, i64* [[COMP_CASTED10]], align 8
434 // CHECK1-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
435 // CHECK1-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
436 // CHECK1-NEXT:    store i64 [[TMP41]], i64* [[TMP45]], align 8
437 // CHECK1-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
438 // CHECK1-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i64*
439 // CHECK1-NEXT:    store i64 [[TMP41]], i64* [[TMP47]], align 8
440 // CHECK1-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i64 0, i64 0
441 // CHECK1-NEXT:    store i8* null, i8** [[TMP48]], align 8
442 // CHECK1-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 1
443 // CHECK1-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64*
444 // CHECK1-NEXT:    store i64 [[TMP43]], i64* [[TMP50]], align 8
445 // CHECK1-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 1
446 // CHECK1-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64*
447 // CHECK1-NEXT:    store i64 [[TMP43]], i64* [[TMP52]], align 8
448 // CHECK1-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i64 0, i64 1
449 // CHECK1-NEXT:    store i8* null, i8** [[TMP53]], align 8
450 // CHECK1-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
451 // CHECK1-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
452 // CHECK1-NEXT:    [[TMP56:%.*]] = load i32, i32* [[LA]], align 4
453 // CHECK1-NEXT:    [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
454 // CHECK1-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 0
455 // CHECK1-NEXT:    store i32 1, i32* [[TMP57]], align 4
456 // CHECK1-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 1
457 // CHECK1-NEXT:    store i32 2, i32* [[TMP58]], align 4
458 // CHECK1-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 2
459 // CHECK1-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 8
460 // CHECK1-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 3
461 // CHECK1-NEXT:    store i8** [[TMP55]], i8*** [[TMP60]], align 8
462 // CHECK1-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 4
463 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP61]], align 8
464 // CHECK1-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 5
465 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP62]], align 8
466 // CHECK1-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 6
467 // CHECK1-NEXT:    store i8** null, i8*** [[TMP63]], align 8
468 // CHECK1-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 7
469 // CHECK1-NEXT:    store i8** null, i8*** [[TMP64]], align 8
470 // CHECK1-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]], i32 0, i32 8
471 // CHECK1-NEXT:    store i64 0, i64* [[TMP65]], align 8
472 // CHECK1-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[TMP56]], i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS15]])
473 // CHECK1-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
474 // CHECK1-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
475 // CHECK1:       omp_offload.failed16:
476 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR2]]
477 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
478 // CHECK1:       omp_offload.cont17:
479 // CHECK1-NEXT:    [[TMP68:%.*]] = load i32, i32* [[LA]], align 4
480 // CHECK1-NEXT:    [[CONV19:%.*]] = bitcast i64* [[LA_CASTED18]] to i32*
481 // CHECK1-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
482 // CHECK1-NEXT:    [[TMP69:%.*]] = load i64, i64* [[LA_CASTED18]], align 8
483 // CHECK1-NEXT:    [[TMP70:%.*]] = load i32, i32* [[COMP]], align 4
484 // CHECK1-NEXT:    [[CONV21:%.*]] = bitcast i64* [[COMP_CASTED20]] to i32*
485 // CHECK1-NEXT:    store i32 [[TMP70]], i32* [[CONV21]], align 4
486 // CHECK1-NEXT:    [[TMP71:%.*]] = load i64, i64* [[COMP_CASTED20]], align 8
487 // CHECK1-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
488 // CHECK1-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64*
489 // CHECK1-NEXT:    store i64 [[TMP69]], i64* [[TMP73]], align 8
490 // CHECK1-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
491 // CHECK1-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64*
492 // CHECK1-NEXT:    store i64 [[TMP69]], i64* [[TMP75]], align 8
493 // CHECK1-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
494 // CHECK1-NEXT:    store i8* null, i8** [[TMP76]], align 8
495 // CHECK1-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
496 // CHECK1-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
497 // CHECK1-NEXT:    store i64 [[TMP71]], i64* [[TMP78]], align 8
498 // CHECK1-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
499 // CHECK1-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
500 // CHECK1-NEXT:    store i64 [[TMP71]], i64* [[TMP80]], align 8
501 // CHECK1-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
502 // CHECK1-NEXT:    store i8* null, i8** [[TMP81]], align 8
503 // CHECK1-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
504 // CHECK1-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
505 // CHECK1-NEXT:    [[TMP84:%.*]] = load i32, i32* [[LA]], align 4
506 // CHECK1-NEXT:    [[KERNEL_ARGS25:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
507 // CHECK1-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 0
508 // CHECK1-NEXT:    store i32 1, i32* [[TMP85]], align 4
509 // CHECK1-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 1
510 // CHECK1-NEXT:    store i32 2, i32* [[TMP86]], align 4
511 // CHECK1-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 2
512 // CHECK1-NEXT:    store i8** [[TMP82]], i8*** [[TMP87]], align 8
513 // CHECK1-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 3
514 // CHECK1-NEXT:    store i8** [[TMP83]], i8*** [[TMP88]], align 8
515 // CHECK1-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 4
516 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP89]], align 8
517 // CHECK1-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 5
518 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP90]], align 8
519 // CHECK1-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 6
520 // CHECK1-NEXT:    store i8** null, i8*** [[TMP91]], align 8
521 // CHECK1-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 7
522 // CHECK1-NEXT:    store i8** null, i8*** [[TMP92]], align 8
523 // CHECK1-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]], i32 0, i32 8
524 // CHECK1-NEXT:    store i64 0, i64* [[TMP93]], align 8
525 // CHECK1-NEXT:    [[TMP94:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 [[TMP84]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS25]])
526 // CHECK1-NEXT:    [[TMP95:%.*]] = icmp ne i32 [[TMP94]], 0
527 // CHECK1-NEXT:    br i1 [[TMP95]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
528 // CHECK1:       omp_offload.failed26:
529 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53(i64 [[TMP69]], i64 [[TMP71]]) #[[ATTR2]]
530 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT27]]
531 // CHECK1:       omp_offload.cont27:
532 // CHECK1-NEXT:    [[TMP96:%.*]] = load i32, i32* @Gbla, align 4
533 // CHECK1-NEXT:    [[CONV28:%.*]] = bitcast i64* [[GBLA_CASTED]] to i32*
534 // CHECK1-NEXT:    store i32 [[TMP96]], i32* [[CONV28]], align 4
535 // CHECK1-NEXT:    [[TMP97:%.*]] = load i64, i64* [[GBLA_CASTED]], align 8
536 // CHECK1-NEXT:    [[TMP98:%.*]] = load i32, i32* [[A_ADDR]], align 4
537 // CHECK1-NEXT:    [[CONV29:%.*]] = bitcast i64* [[A_CASTED]] to i32*
538 // CHECK1-NEXT:    store i32 [[TMP98]], i32* [[CONV29]], align 4
539 // CHECK1-NEXT:    [[TMP99:%.*]] = load i64, i64* [[A_CASTED]], align 8
540 // CHECK1-NEXT:    [[TMP100:%.*]] = load i64, i64* @Gblb, align 8
541 // CHECK1-NEXT:    store i64 [[TMP100]], i64* [[GBLB_CASTED]], align 8
542 // CHECK1-NEXT:    [[TMP101:%.*]] = load i64, i64* [[GBLB_CASTED]], align 8
543 // CHECK1-NEXT:    [[TMP102:%.*]] = load float, float* [[LC]], align 4
544 // CHECK1-NEXT:    [[CONV30:%.*]] = bitcast i64* [[LC_CASTED]] to float*
545 // CHECK1-NEXT:    store float [[TMP102]], float* [[CONV30]], align 4
546 // CHECK1-NEXT:    [[TMP103:%.*]] = load i64, i64* [[LC_CASTED]], align 8
547 // CHECK1-NEXT:    [[TMP104:%.*]] = load i32, i32* [[COMP]], align 4
548 // CHECK1-NEXT:    [[CONV32:%.*]] = bitcast i64* [[COMP_CASTED31]] to i32*
549 // CHECK1-NEXT:    store i32 [[TMP104]], i32* [[CONV32]], align 4
550 // CHECK1-NEXT:    [[TMP105:%.*]] = load i64, i64* [[COMP_CASTED31]], align 8
551 // CHECK1-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
552 // CHECK1-NEXT:    [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64*
553 // CHECK1-NEXT:    store i64 [[TMP97]], i64* [[TMP107]], align 8
554 // CHECK1-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
555 // CHECK1-NEXT:    [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i64*
556 // CHECK1-NEXT:    store i64 [[TMP97]], i64* [[TMP109]], align 8
557 // CHECK1-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0
558 // CHECK1-NEXT:    store i8* null, i8** [[TMP110]], align 8
559 // CHECK1-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
560 // CHECK1-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
561 // CHECK1-NEXT:    store i64 [[TMP99]], i64* [[TMP112]], align 8
562 // CHECK1-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
563 // CHECK1-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
564 // CHECK1-NEXT:    store i64 [[TMP99]], i64* [[TMP114]], align 8
565 // CHECK1-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 1
566 // CHECK1-NEXT:    store i8* null, i8** [[TMP115]], align 8
567 // CHECK1-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
568 // CHECK1-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64*
569 // CHECK1-NEXT:    store i64 [[TMP101]], i64* [[TMP117]], align 8
570 // CHECK1-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
571 // CHECK1-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64*
572 // CHECK1-NEXT:    store i64 [[TMP101]], i64* [[TMP119]], align 8
573 // CHECK1-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 2
574 // CHECK1-NEXT:    store i8* null, i8** [[TMP120]], align 8
575 // CHECK1-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 3
576 // CHECK1-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64*
577 // CHECK1-NEXT:    store i64 [[TMP103]], i64* [[TMP122]], align 8
578 // CHECK1-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 3
579 // CHECK1-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64*
580 // CHECK1-NEXT:    store i64 [[TMP103]], i64* [[TMP124]], align 8
581 // CHECK1-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 3
582 // CHECK1-NEXT:    store i8* null, i8** [[TMP125]], align 8
583 // CHECK1-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 4
584 // CHECK1-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64*
585 // CHECK1-NEXT:    store i64 [[TMP105]], i64* [[TMP127]], align 8
586 // CHECK1-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 4
587 // CHECK1-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64*
588 // CHECK1-NEXT:    store i64 [[TMP105]], i64* [[TMP129]], align 8
589 // CHECK1-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 4
590 // CHECK1-NEXT:    store i8* null, i8** [[TMP130]], align 8
591 // CHECK1-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
592 // CHECK1-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
593 // CHECK1-NEXT:    [[TMP133:%.*]] = load i32, i32* @Gbla, align 4
594 // CHECK1-NEXT:    [[TMP134:%.*]] = load i32, i32* [[A_ADDR]], align 4
595 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP133]], [[TMP134]]
596 // CHECK1-NEXT:    [[TMP135:%.*]] = load i64, i64* @Gblb, align 8
597 // CHECK1-NEXT:    [[TMP136:%.*]] = load float, float* [[LC]], align 4
598 // CHECK1-NEXT:    [[CONV36:%.*]] = fptosi float [[TMP136]] to i64
599 // CHECK1-NEXT:    [[ADD37:%.*]] = add nsw i64 [[TMP135]], [[CONV36]]
600 // CHECK1-NEXT:    [[TMP137:%.*]] = trunc i64 [[ADD37]] to i32
601 // CHECK1-NEXT:    [[KERNEL_ARGS38:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
602 // CHECK1-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 0
603 // CHECK1-NEXT:    store i32 1, i32* [[TMP138]], align 4
604 // CHECK1-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 1
605 // CHECK1-NEXT:    store i32 5, i32* [[TMP139]], align 4
606 // CHECK1-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 2
607 // CHECK1-NEXT:    store i8** [[TMP131]], i8*** [[TMP140]], align 8
608 // CHECK1-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 3
609 // CHECK1-NEXT:    store i8** [[TMP132]], i8*** [[TMP141]], align 8
610 // CHECK1-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 4
611 // CHECK1-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP142]], align 8
612 // CHECK1-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 5
613 // CHECK1-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP143]], align 8
614 // CHECK1-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 6
615 // CHECK1-NEXT:    store i8** null, i8*** [[TMP144]], align 8
616 // CHECK1-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 7
617 // CHECK1-NEXT:    store i8** null, i8*** [[TMP145]], align 8
618 // CHECK1-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]], i32 0, i32 8
619 // CHECK1-NEXT:    store i64 0, i64* [[TMP146]], align 8
620 // CHECK1-NEXT:    [[TMP147:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[ADD]], i32 [[TMP137]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS38]])
621 // CHECK1-NEXT:    [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0
622 // CHECK1-NEXT:    br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED39:%.*]], label [[OMP_OFFLOAD_CONT40:%.*]]
623 // CHECK1:       omp_offload.failed39:
624 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62(i64 [[TMP97]], i64 [[TMP99]], i64 [[TMP101]], i64 [[TMP103]], i64 [[TMP105]]) #[[ATTR2]]
625 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT40]]
626 // CHECK1:       omp_offload.cont40:
627 // CHECK1-NEXT:    [[TMP149:%.*]] = load i32*, i32** @Gblc, align 8
628 // CHECK1-NEXT:    store i32* [[TMP149]], i32** [[TMP]], align 8
629 // CHECK1-NEXT:    [[TMP150:%.*]] = load i32, i32* @Gbla, align 4
630 // CHECK1-NEXT:    [[CONV41:%.*]] = bitcast i64* [[GBLC_CASTED]] to i32*
631 // CHECK1-NEXT:    store i32 [[TMP150]], i32* [[CONV41]], align 4
632 // CHECK1-NEXT:    [[TMP151:%.*]] = load i64, i64* [[GBLC_CASTED]], align 8
633 // CHECK1-NEXT:    [[TMP152:%.*]] = load i32, i32* [[COMP]], align 4
634 // CHECK1-NEXT:    [[CONV43:%.*]] = bitcast i64* [[COMP_CASTED42]] to i32*
635 // CHECK1-NEXT:    store i32 [[TMP152]], i32* [[CONV43]], align 4
636 // CHECK1-NEXT:    [[TMP153:%.*]] = load i64, i64* [[COMP_CASTED42]], align 8
637 // CHECK1-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
638 // CHECK1-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i64*
639 // CHECK1-NEXT:    store i64 [[TMP151]], i64* [[TMP155]], align 8
640 // CHECK1-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
641 // CHECK1-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
642 // CHECK1-NEXT:    store i64 [[TMP151]], i64* [[TMP157]], align 8
643 // CHECK1-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i64 0, i64 0
644 // CHECK1-NEXT:    store i8* null, i8** [[TMP158]], align 8
645 // CHECK1-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
646 // CHECK1-NEXT:    [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i64*
647 // CHECK1-NEXT:    store i64 [[TMP153]], i64* [[TMP160]], align 8
648 // CHECK1-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
649 // CHECK1-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i64*
650 // CHECK1-NEXT:    store i64 [[TMP153]], i64* [[TMP162]], align 8
651 // CHECK1-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i64 0, i64 1
652 // CHECK1-NEXT:    store i8* null, i8** [[TMP163]], align 8
653 // CHECK1-NEXT:    [[TMP164:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
654 // CHECK1-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
655 // CHECK1-NEXT:    [[TMP166:%.*]] = load i32*, i32** @Gblc, align 8
656 // CHECK1-NEXT:    store i32* [[TMP166]], i32** [[_TMP47]], align 8
657 // CHECK1-NEXT:    [[TMP167:%.*]] = load i32, i32* @Gbla, align 4
658 // CHECK1-NEXT:    [[ADD48:%.*]] = add nsw i32 [[TMP167]], 1
659 // CHECK1-NEXT:    [[TMP168:%.*]] = load i32*, i32** @Gblc, align 8
660 // CHECK1-NEXT:    store i32* [[TMP168]], i32** [[_TMP49]], align 8
661 // CHECK1-NEXT:    [[TMP169:%.*]] = load i32, i32* @Gbla, align 4
662 // CHECK1-NEXT:    [[ADD50:%.*]] = add nsw i32 [[TMP169]], 2
663 // CHECK1-NEXT:    [[KERNEL_ARGS51:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
664 // CHECK1-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 0
665 // CHECK1-NEXT:    store i32 1, i32* [[TMP170]], align 4
666 // CHECK1-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 1
667 // CHECK1-NEXT:    store i32 2, i32* [[TMP171]], align 4
668 // CHECK1-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 2
669 // CHECK1-NEXT:    store i8** [[TMP164]], i8*** [[TMP172]], align 8
670 // CHECK1-NEXT:    [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 3
671 // CHECK1-NEXT:    store i8** [[TMP165]], i8*** [[TMP173]], align 8
672 // CHECK1-NEXT:    [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 4
673 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP174]], align 8
674 // CHECK1-NEXT:    [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 5
675 // CHECK1-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP175]], align 8
676 // CHECK1-NEXT:    [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 6
677 // CHECK1-NEXT:    store i8** null, i8*** [[TMP176]], align 8
678 // CHECK1-NEXT:    [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 7
679 // CHECK1-NEXT:    store i8** null, i8*** [[TMP177]], align 8
680 // CHECK1-NEXT:    [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]], i32 0, i32 8
681 // CHECK1-NEXT:    store i64 0, i64* [[TMP178]], align 8
682 // CHECK1-NEXT:    [[TMP179:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[ADD48]], i32 [[ADD50]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS51]])
683 // CHECK1-NEXT:    [[TMP180:%.*]] = icmp ne i32 [[TMP179]], 0
684 // CHECK1-NEXT:    br i1 [[TMP180]], label [[OMP_OFFLOAD_FAILED52:%.*]], label [[OMP_OFFLOAD_CONT53:%.*]]
685 // CHECK1:       omp_offload.failed52:
686 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71(i64 [[TMP151]], i64 [[TMP153]]) #[[ATTR2]]
687 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT53]]
688 // CHECK1:       omp_offload.cont53:
689 // CHECK1-NEXT:    [[TMP181:%.*]] = load i32, i32* [[COMP]], align 4
690 // CHECK1-NEXT:    ret i32 [[TMP181]]
691 //
692 //
693 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31
694 // CHECK1-SAME: (i64 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
695 // CHECK1-NEXT:  entry:
696 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
697 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
698 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
699 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
700 // CHECK1-NEXT:    ret void
701 //
702 //
703 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
704 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
705 // CHECK1-NEXT:  entry:
706 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
707 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
708 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
709 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
710 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
711 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
712 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
713 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
714 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
715 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
716 // CHECK1-NEXT:    ret void
717 //
718 //
719 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37
720 // CHECK1-SAME: (i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
721 // CHECK1-NEXT:  entry:
722 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
723 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
724 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
725 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
726 // CHECK1-NEXT:    ret void
727 //
728 //
729 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
730 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
731 // CHECK1-NEXT:  entry:
732 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
733 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
734 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
735 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
736 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
737 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
738 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
739 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
740 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
741 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
742 // CHECK1-NEXT:    ret void
743 //
744 //
745 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46
746 // CHECK1-SAME: (i64 noundef [[LA:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
747 // CHECK1-NEXT:  entry:
748 // CHECK1-NEXT:    [[LA_ADDR:%.*]] = alloca i64, align 8
749 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
750 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
751 // CHECK1-NEXT:    store i64 [[LA]], i64* [[LA_ADDR]], align 8
752 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
753 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[LA_ADDR]] to i32*
754 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
755 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
756 // CHECK1-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 0)
757 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[CONV1]])
758 // CHECK1-NEXT:    ret void
759 //
760 //
761 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
762 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
763 // CHECK1-NEXT:  entry:
764 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
765 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
766 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
767 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
768 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
769 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
770 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
771 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
772 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
773 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
774 // CHECK1-NEXT:    ret void
775 //
776 //
777 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53
778 // CHECK1-SAME: (i64 noundef [[LA:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
779 // CHECK1-NEXT:  entry:
780 // CHECK1-NEXT:    [[LA_ADDR:%.*]] = alloca i64, align 8
781 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
782 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
783 // CHECK1-NEXT:    store i64 [[LA]], i64* [[LA_ADDR]], align 8
784 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
785 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[LA_ADDR]] to i32*
786 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
787 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
788 // CHECK1-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]])
789 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[CONV1]])
790 // CHECK1-NEXT:    ret void
791 //
792 //
793 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
794 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
795 // CHECK1-NEXT:  entry:
796 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
797 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
798 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
799 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
800 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
801 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
802 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
803 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
804 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
805 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
806 // CHECK1-NEXT:    ret void
807 //
808 //
809 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62
810 // CHECK1-SAME: (i64 noundef [[GBLA:%.*]], i64 noundef [[A:%.*]], i64 noundef [[GBLB:%.*]], i64 noundef [[LC:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
811 // CHECK1-NEXT:  entry:
812 // CHECK1-NEXT:    [[GBLA_ADDR:%.*]] = alloca i64, align 8
813 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
814 // CHECK1-NEXT:    [[GBLB_ADDR:%.*]] = alloca i64, align 8
815 // CHECK1-NEXT:    [[LC_ADDR:%.*]] = alloca i64, align 8
816 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
817 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
818 // CHECK1-NEXT:    store i64 [[GBLA]], i64* [[GBLA_ADDR]], align 8
819 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
820 // CHECK1-NEXT:    store i64 [[GBLB]], i64* [[GBLB_ADDR]], align 8
821 // CHECK1-NEXT:    store i64 [[LC]], i64* [[LC_ADDR]], align 8
822 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
823 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[GBLA_ADDR]] to i32*
824 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_ADDR]] to i32*
825 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[LC_ADDR]] to float*
826 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
827 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
828 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
829 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
830 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[GBLB_ADDR]], align 8
831 // CHECK1-NEXT:    [[TMP4:%.*]] = load float, float* [[CONV2]], align 4
832 // CHECK1-NEXT:    [[CONV4:%.*]] = fptosi float [[TMP4]] to i64
833 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP3]], [[CONV4]]
834 // CHECK1-NEXT:    [[TMP5:%.*]] = trunc i64 [[ADD5]] to i32
835 // CHECK1-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[ADD]], i32 [[TMP5]])
836 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV3]])
837 // CHECK1-NEXT:    ret void
838 //
839 //
840 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
841 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
842 // CHECK1-NEXT:  entry:
843 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
844 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
845 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
846 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
847 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
848 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
849 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
850 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
851 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
852 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
853 // CHECK1-NEXT:    ret void
854 //
855 //
856 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71
857 // CHECK1-SAME: (i64 noundef [[GBLC:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
858 // CHECK1-NEXT:  entry:
859 // CHECK1-NEXT:    [[GBLC_ADDR:%.*]] = alloca i64, align 8
860 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
861 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32*, align 8
862 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
863 // CHECK1-NEXT:    store i64 [[GBLC]], i64* [[GBLC_ADDR]], align 8
864 // CHECK1-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
865 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[GBLC_ADDR]] to i32*
866 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
867 // CHECK1-NEXT:    store i32* [[CONV]], i32** [[TMP]], align 8
868 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* @Gbla, align 4
869 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
870 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* @Gbla, align 4
871 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP2]], 2
872 // CHECK1-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[ADD]], i32 [[ADD2]])
873 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 8
874 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[CONV1]], i32* [[TMP3]])
875 // CHECK1-NEXT:    ret void
876 //
877 //
878 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13
879 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[GBLC:%.*]]) #[[ATTR1]] {
880 // CHECK1-NEXT:  entry:
881 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
882 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
883 // CHECK1-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
884 // CHECK1-NEXT:    [[GBLC_ADDR:%.*]] = alloca i32*, align 8
885 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32*, align 8
886 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
887 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
888 // CHECK1-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
889 // CHECK1-NEXT:    store i32* [[GBLC]], i32** [[GBLC_ADDR]], align 8
890 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
891 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[GBLC_ADDR]], align 8
892 // CHECK1-NEXT:    store i32* [[TMP1]], i32** [[TMP]], align 8
893 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* @Gbla, align 4
894 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
895 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
896 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
897 // CHECK1-NEXT:    ret void
898 //
899 //
900 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
901 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
902 // CHECK1-NEXT:  entry:
903 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
904 // CHECK1-NEXT:    ret void
905 //
906 //
907 // CHECK3-LABEL: define {{[^@]+}}@_Z27teams_argument_global_locali
908 // CHECK3-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
909 // CHECK3-NEXT:  entry:
910 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
911 // CHECK3-NEXT:    [[COMP:%.*]] = alloca i32, align 4
912 // CHECK3-NEXT:    [[LA:%.*]] = alloca i32, align 4
913 // CHECK3-NEXT:    [[LC:%.*]] = alloca float, align 4
914 // CHECK3-NEXT:    [[COMP_CASTED:%.*]] = alloca i32, align 4
915 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
916 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
917 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
918 // CHECK3-NEXT:    [[COMP_CASTED1:%.*]] = alloca i32, align 4
919 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS2:%.*]] = alloca [1 x i8*], align 4
920 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS3:%.*]] = alloca [1 x i8*], align 4
921 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS4:%.*]] = alloca [1 x i8*], align 4
922 // CHECK3-NEXT:    [[LA_CASTED:%.*]] = alloca i32, align 4
923 // CHECK3-NEXT:    [[COMP_CASTED8:%.*]] = alloca i32, align 4
924 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [2 x i8*], align 4
925 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS10:%.*]] = alloca [2 x i8*], align 4
926 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [2 x i8*], align 4
927 // CHECK3-NEXT:    [[LA_CASTED15:%.*]] = alloca i32, align 4
928 // CHECK3-NEXT:    [[COMP_CASTED16:%.*]] = alloca i32, align 4
929 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [2 x i8*], align 4
930 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [2 x i8*], align 4
931 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [2 x i8*], align 4
932 // CHECK3-NEXT:    [[GBLA_CASTED:%.*]] = alloca i32, align 4
933 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
934 // CHECK3-NEXT:    [[LC_CASTED:%.*]] = alloca i32, align 4
935 // CHECK3-NEXT:    [[COMP_CASTED23:%.*]] = alloca i32, align 4
936 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [5 x i8*], align 4
937 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS25:%.*]] = alloca [5 x i8*], align 4
938 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [5 x i8*], align 4
939 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32*, align 4
940 // CHECK3-NEXT:    [[GBLC_CASTED:%.*]] = alloca i32, align 4
941 // CHECK3-NEXT:    [[COMP_CASTED32:%.*]] = alloca i32, align 4
942 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [2 x i8*], align 4
943 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS34:%.*]] = alloca [2 x i8*], align 4
944 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [2 x i8*], align 4
945 // CHECK3-NEXT:    [[_TMP36:%.*]] = alloca i32*, align 4
946 // CHECK3-NEXT:    [[_TMP38:%.*]] = alloca i32*, align 4
947 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
948 // CHECK3-NEXT:    store i32 1, i32* [[COMP]], align 4
949 // CHECK3-NEXT:    store i32 23, i32* [[LA]], align 4
950 // CHECK3-NEXT:    store float 2.500000e+01, float* [[LC]], align 4
951 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
952 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[COMP_CASTED]], align 4
953 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[COMP_CASTED]], align 4
954 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
955 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32*
956 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP3]], align 4
957 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
958 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32*
959 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP5]], align 4
960 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
961 // CHECK3-NEXT:    store i8* null, i8** [[TMP6]], align 4
962 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
963 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
964 // CHECK3-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
965 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
966 // CHECK3-NEXT:    store i32 1, i32* [[TMP9]], align 4
967 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
968 // CHECK3-NEXT:    store i32 1, i32* [[TMP10]], align 4
969 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
970 // CHECK3-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 4
971 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
972 // CHECK3-NEXT:    store i8** [[TMP8]], i8*** [[TMP12]], align 4
973 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
974 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP13]], align 4
975 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
976 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP14]], align 4
977 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
978 // CHECK3-NEXT:    store i8** null, i8*** [[TMP15]], align 4
979 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
980 // CHECK3-NEXT:    store i8** null, i8*** [[TMP16]], align 4
981 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
982 // CHECK3-NEXT:    store i64 0, i64* [[TMP17]], align 8
983 // CHECK3-NEXT:    [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
984 // CHECK3-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
985 // CHECK3-NEXT:    br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
986 // CHECK3:       omp_offload.failed:
987 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31(i32 [[TMP1]]) #[[ATTR2:[0-9]+]]
988 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
989 // CHECK3:       omp_offload.cont:
990 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[COMP]], align 4
991 // CHECK3-NEXT:    store i32 [[TMP20]], i32* [[COMP_CASTED1]], align 4
992 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[COMP_CASTED1]], align 4
993 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 0
994 // CHECK3-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
995 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[TMP23]], align 4
996 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 0
997 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
998 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[TMP25]], align 4
999 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS4]], i32 0, i32 0
1000 // CHECK3-NEXT:    store i8* null, i8** [[TMP26]], align 4
1001 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 0
1002 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 0
1003 // CHECK3-NEXT:    [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1004 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 0
1005 // CHECK3-NEXT:    store i32 1, i32* [[TMP29]], align 4
1006 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 1
1007 // CHECK3-NEXT:    store i32 1, i32* [[TMP30]], align 4
1008 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 2
1009 // CHECK3-NEXT:    store i8** [[TMP27]], i8*** [[TMP31]], align 4
1010 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 3
1011 // CHECK3-NEXT:    store i8** [[TMP28]], i8*** [[TMP32]], align 4
1012 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 4
1013 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP33]], align 4
1014 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 5
1015 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP34]], align 4
1016 // CHECK3-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 6
1017 // CHECK3-NEXT:    store i8** null, i8*** [[TMP35]], align 4
1018 // CHECK3-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 7
1019 // CHECK3-NEXT:    store i8** null, i8*** [[TMP36]], align 4
1020 // CHECK3-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]], i32 0, i32 8
1021 // CHECK3-NEXT:    store i64 0, i64* [[TMP37]], align 8
1022 // CHECK3-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS5]])
1023 // CHECK3-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
1024 // CHECK3-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
1025 // CHECK3:       omp_offload.failed6:
1026 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37(i32 [[TMP21]]) #[[ATTR2]]
1027 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT7]]
1028 // CHECK3:       omp_offload.cont7:
1029 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32, i32* [[LA]], align 4
1030 // CHECK3-NEXT:    store i32 [[TMP40]], i32* [[LA_CASTED]], align 4
1031 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[LA_CASTED]], align 4
1032 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[COMP]], align 4
1033 // CHECK3-NEXT:    store i32 [[TMP42]], i32* [[COMP_CASTED8]], align 4
1034 // CHECK3-NEXT:    [[TMP43:%.*]] = load i32, i32* [[COMP_CASTED8]], align 4
1035 // CHECK3-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
1036 // CHECK3-NEXT:    [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32*
1037 // CHECK3-NEXT:    store i32 [[TMP41]], i32* [[TMP45]], align 4
1038 // CHECK3-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
1039 // CHECK3-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32*
1040 // CHECK3-NEXT:    store i32 [[TMP41]], i32* [[TMP47]], align 4
1041 // CHECK3-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i32 0, i32 0
1042 // CHECK3-NEXT:    store i8* null, i8** [[TMP48]], align 4
1043 // CHECK3-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 1
1044 // CHECK3-NEXT:    [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32*
1045 // CHECK3-NEXT:    store i32 [[TMP43]], i32* [[TMP50]], align 4
1046 // CHECK3-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 1
1047 // CHECK3-NEXT:    [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32*
1048 // CHECK3-NEXT:    store i32 [[TMP43]], i32* [[TMP52]], align 4
1049 // CHECK3-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i32 0, i32 1
1050 // CHECK3-NEXT:    store i8* null, i8** [[TMP53]], align 4
1051 // CHECK3-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
1052 // CHECK3-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
1053 // CHECK3-NEXT:    [[TMP56:%.*]] = load i32, i32* [[LA]], align 4
1054 // CHECK3-NEXT:    [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1055 // CHECK3-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 0
1056 // CHECK3-NEXT:    store i32 1, i32* [[TMP57]], align 4
1057 // CHECK3-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 1
1058 // CHECK3-NEXT:    store i32 2, i32* [[TMP58]], align 4
1059 // CHECK3-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 2
1060 // CHECK3-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 4
1061 // CHECK3-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 3
1062 // CHECK3-NEXT:    store i8** [[TMP55]], i8*** [[TMP60]], align 4
1063 // CHECK3-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 4
1064 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP61]], align 4
1065 // CHECK3-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 5
1066 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP62]], align 4
1067 // CHECK3-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 6
1068 // CHECK3-NEXT:    store i8** null, i8*** [[TMP63]], align 4
1069 // CHECK3-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 7
1070 // CHECK3-NEXT:    store i8** null, i8*** [[TMP64]], align 4
1071 // CHECK3-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]], i32 0, i32 8
1072 // CHECK3-NEXT:    store i64 0, i64* [[TMP65]], align 8
1073 // CHECK3-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[TMP56]], i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS12]])
1074 // CHECK3-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
1075 // CHECK3-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
1076 // CHECK3:       omp_offload.failed13:
1077 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46(i32 [[TMP41]], i32 [[TMP43]]) #[[ATTR2]]
1078 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT14]]
1079 // CHECK3:       omp_offload.cont14:
1080 // CHECK3-NEXT:    [[TMP68:%.*]] = load i32, i32* [[LA]], align 4
1081 // CHECK3-NEXT:    store i32 [[TMP68]], i32* [[LA_CASTED15]], align 4
1082 // CHECK3-NEXT:    [[TMP69:%.*]] = load i32, i32* [[LA_CASTED15]], align 4
1083 // CHECK3-NEXT:    [[TMP70:%.*]] = load i32, i32* [[COMP]], align 4
1084 // CHECK3-NEXT:    store i32 [[TMP70]], i32* [[COMP_CASTED16]], align 4
1085 // CHECK3-NEXT:    [[TMP71:%.*]] = load i32, i32* [[COMP_CASTED16]], align 4
1086 // CHECK3-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
1087 // CHECK3-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32*
1088 // CHECK3-NEXT:    store i32 [[TMP69]], i32* [[TMP73]], align 4
1089 // CHECK3-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
1090 // CHECK3-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32*
1091 // CHECK3-NEXT:    store i32 [[TMP69]], i32* [[TMP75]], align 4
1092 // CHECK3-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
1093 // CHECK3-NEXT:    store i8* null, i8** [[TMP76]], align 4
1094 // CHECK3-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
1095 // CHECK3-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
1096 // CHECK3-NEXT:    store i32 [[TMP71]], i32* [[TMP78]], align 4
1097 // CHECK3-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
1098 // CHECK3-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
1099 // CHECK3-NEXT:    store i32 [[TMP71]], i32* [[TMP80]], align 4
1100 // CHECK3-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
1101 // CHECK3-NEXT:    store i8* null, i8** [[TMP81]], align 4
1102 // CHECK3-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
1103 // CHECK3-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
1104 // CHECK3-NEXT:    [[TMP84:%.*]] = load i32, i32* [[LA]], align 4
1105 // CHECK3-NEXT:    [[KERNEL_ARGS20:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1106 // CHECK3-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 0
1107 // CHECK3-NEXT:    store i32 1, i32* [[TMP85]], align 4
1108 // CHECK3-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 1
1109 // CHECK3-NEXT:    store i32 2, i32* [[TMP86]], align 4
1110 // CHECK3-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 2
1111 // CHECK3-NEXT:    store i8** [[TMP82]], i8*** [[TMP87]], align 4
1112 // CHECK3-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 3
1113 // CHECK3-NEXT:    store i8** [[TMP83]], i8*** [[TMP88]], align 4
1114 // CHECK3-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 4
1115 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP89]], align 4
1116 // CHECK3-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 5
1117 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP90]], align 4
1118 // CHECK3-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 6
1119 // CHECK3-NEXT:    store i8** null, i8*** [[TMP91]], align 4
1120 // CHECK3-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 7
1121 // CHECK3-NEXT:    store i8** null, i8*** [[TMP92]], align 4
1122 // CHECK3-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]], i32 0, i32 8
1123 // CHECK3-NEXT:    store i64 0, i64* [[TMP93]], align 8
1124 // CHECK3-NEXT:    [[TMP94:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 [[TMP84]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS20]])
1125 // CHECK3-NEXT:    [[TMP95:%.*]] = icmp ne i32 [[TMP94]], 0
1126 // CHECK3-NEXT:    br i1 [[TMP95]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
1127 // CHECK3:       omp_offload.failed21:
1128 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR2]]
1129 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT22]]
1130 // CHECK3:       omp_offload.cont22:
1131 // CHECK3-NEXT:    [[TMP96:%.*]] = load i32, i32* @Gbla, align 4
1132 // CHECK3-NEXT:    store i32 [[TMP96]], i32* [[GBLA_CASTED]], align 4
1133 // CHECK3-NEXT:    [[TMP97:%.*]] = load i32, i32* [[GBLA_CASTED]], align 4
1134 // CHECK3-NEXT:    [[TMP98:%.*]] = load i32, i32* [[A_ADDR]], align 4
1135 // CHECK3-NEXT:    store i32 [[TMP98]], i32* [[A_CASTED]], align 4
1136 // CHECK3-NEXT:    [[TMP99:%.*]] = load i32, i32* [[A_CASTED]], align 4
1137 // CHECK3-NEXT:    [[TMP100:%.*]] = load float, float* [[LC]], align 4
1138 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[LC_CASTED]] to float*
1139 // CHECK3-NEXT:    store float [[TMP100]], float* [[CONV]], align 4
1140 // CHECK3-NEXT:    [[TMP101:%.*]] = load i32, i32* [[LC_CASTED]], align 4
1141 // CHECK3-NEXT:    [[TMP102:%.*]] = load i32, i32* [[COMP]], align 4
1142 // CHECK3-NEXT:    store i32 [[TMP102]], i32* [[COMP_CASTED23]], align 4
1143 // CHECK3-NEXT:    [[TMP103:%.*]] = load i32, i32* [[COMP_CASTED23]], align 4
1144 // CHECK3-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
1145 // CHECK3-NEXT:    [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32*
1146 // CHECK3-NEXT:    store i32 [[TMP97]], i32* [[TMP105]], align 4
1147 // CHECK3-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
1148 // CHECK3-NEXT:    [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32*
1149 // CHECK3-NEXT:    store i32 [[TMP97]], i32* [[TMP107]], align 4
1150 // CHECK3-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
1151 // CHECK3-NEXT:    store i8* null, i8** [[TMP108]], align 4
1152 // CHECK3-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 1
1153 // CHECK3-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32*
1154 // CHECK3-NEXT:    store i32 [[TMP99]], i32* [[TMP110]], align 4
1155 // CHECK3-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 1
1156 // CHECK3-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
1157 // CHECK3-NEXT:    store i32 [[TMP99]], i32* [[TMP112]], align 4
1158 // CHECK3-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 1
1159 // CHECK3-NEXT:    store i8* null, i8** [[TMP113]], align 4
1160 // CHECK3-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 2
1161 // CHECK3-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i64**
1162 // CHECK3-NEXT:    store i64* @Gblb, i64** [[TMP115]], align 4
1163 // CHECK3-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 2
1164 // CHECK3-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64**
1165 // CHECK3-NEXT:    store i64* @Gblb, i64** [[TMP117]], align 4
1166 // CHECK3-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 2
1167 // CHECK3-NEXT:    store i8* null, i8** [[TMP118]], align 4
1168 // CHECK3-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 3
1169 // CHECK3-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32*
1170 // CHECK3-NEXT:    store i32 [[TMP101]], i32* [[TMP120]], align 4
1171 // CHECK3-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 3
1172 // CHECK3-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32*
1173 // CHECK3-NEXT:    store i32 [[TMP101]], i32* [[TMP122]], align 4
1174 // CHECK3-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 3
1175 // CHECK3-NEXT:    store i8* null, i8** [[TMP123]], align 4
1176 // CHECK3-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 4
1177 // CHECK3-NEXT:    [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i32*
1178 // CHECK3-NEXT:    store i32 [[TMP103]], i32* [[TMP125]], align 4
1179 // CHECK3-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 4
1180 // CHECK3-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32*
1181 // CHECK3-NEXT:    store i32 [[TMP103]], i32* [[TMP127]], align 4
1182 // CHECK3-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 4
1183 // CHECK3-NEXT:    store i8* null, i8** [[TMP128]], align 4
1184 // CHECK3-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
1185 // CHECK3-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
1186 // CHECK3-NEXT:    [[TMP131:%.*]] = load i32, i32* @Gbla, align 4
1187 // CHECK3-NEXT:    [[TMP132:%.*]] = load i32, i32* [[A_ADDR]], align 4
1188 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP131]], [[TMP132]]
1189 // CHECK3-NEXT:    [[TMP133:%.*]] = load i64, i64* @Gblb, align 8
1190 // CHECK3-NEXT:    [[TMP134:%.*]] = load float, float* [[LC]], align 4
1191 // CHECK3-NEXT:    [[CONV27:%.*]] = fptosi float [[TMP134]] to i64
1192 // CHECK3-NEXT:    [[ADD28:%.*]] = add nsw i64 [[TMP133]], [[CONV27]]
1193 // CHECK3-NEXT:    [[TMP135:%.*]] = trunc i64 [[ADD28]] to i32
1194 // CHECK3-NEXT:    [[KERNEL_ARGS29:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1195 // CHECK3-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 0
1196 // CHECK3-NEXT:    store i32 1, i32* [[TMP136]], align 4
1197 // CHECK3-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 1
1198 // CHECK3-NEXT:    store i32 5, i32* [[TMP137]], align 4
1199 // CHECK3-NEXT:    [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 2
1200 // CHECK3-NEXT:    store i8** [[TMP129]], i8*** [[TMP138]], align 4
1201 // CHECK3-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 3
1202 // CHECK3-NEXT:    store i8** [[TMP130]], i8*** [[TMP139]], align 4
1203 // CHECK3-NEXT:    [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 4
1204 // CHECK3-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP140]], align 4
1205 // CHECK3-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 5
1206 // CHECK3-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP141]], align 4
1207 // CHECK3-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 6
1208 // CHECK3-NEXT:    store i8** null, i8*** [[TMP142]], align 4
1209 // CHECK3-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 7
1210 // CHECK3-NEXT:    store i8** null, i8*** [[TMP143]], align 4
1211 // CHECK3-NEXT:    [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 8
1212 // CHECK3-NEXT:    store i64 0, i64* [[TMP144]], align 8
1213 // CHECK3-NEXT:    [[TMP145:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[ADD]], i32 [[TMP135]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]])
1214 // CHECK3-NEXT:    [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0
1215 // CHECK3-NEXT:    br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
1216 // CHECK3:       omp_offload.failed30:
1217 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62(i32 [[TMP97]], i32 [[TMP99]], i64* @Gblb, i32 [[TMP101]], i32 [[TMP103]]) #[[ATTR2]]
1218 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
1219 // CHECK3:       omp_offload.cont31:
1220 // CHECK3-NEXT:    [[TMP147:%.*]] = load i32*, i32** @Gblc, align 4
1221 // CHECK3-NEXT:    store i32* [[TMP147]], i32** [[TMP]], align 4
1222 // CHECK3-NEXT:    [[TMP148:%.*]] = load i32, i32* @Gbla, align 4
1223 // CHECK3-NEXT:    store i32 [[TMP148]], i32* [[GBLC_CASTED]], align 4
1224 // CHECK3-NEXT:    [[TMP149:%.*]] = load i32, i32* [[GBLC_CASTED]], align 4
1225 // CHECK3-NEXT:    [[TMP150:%.*]] = load i32, i32* [[COMP]], align 4
1226 // CHECK3-NEXT:    store i32 [[TMP150]], i32* [[COMP_CASTED32]], align 4
1227 // CHECK3-NEXT:    [[TMP151:%.*]] = load i32, i32* [[COMP_CASTED32]], align 4
1228 // CHECK3-NEXT:    [[TMP152:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
1229 // CHECK3-NEXT:    [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32*
1230 // CHECK3-NEXT:    store i32 [[TMP149]], i32* [[TMP153]], align 4
1231 // CHECK3-NEXT:    [[TMP154:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
1232 // CHECK3-NEXT:    [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32*
1233 // CHECK3-NEXT:    store i32 [[TMP149]], i32* [[TMP155]], align 4
1234 // CHECK3-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
1235 // CHECK3-NEXT:    store i8* null, i8** [[TMP156]], align 4
1236 // CHECK3-NEXT:    [[TMP157:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
1237 // CHECK3-NEXT:    [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32*
1238 // CHECK3-NEXT:    store i32 [[TMP151]], i32* [[TMP158]], align 4
1239 // CHECK3-NEXT:    [[TMP159:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
1240 // CHECK3-NEXT:    [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32*
1241 // CHECK3-NEXT:    store i32 [[TMP151]], i32* [[TMP160]], align 4
1242 // CHECK3-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
1243 // CHECK3-NEXT:    store i8* null, i8** [[TMP161]], align 4
1244 // CHECK3-NEXT:    [[TMP162:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
1245 // CHECK3-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
1246 // CHECK3-NEXT:    [[TMP164:%.*]] = load i32*, i32** @Gblc, align 4
1247 // CHECK3-NEXT:    store i32* [[TMP164]], i32** [[_TMP36]], align 4
1248 // CHECK3-NEXT:    [[TMP165:%.*]] = load i32, i32* @Gbla, align 4
1249 // CHECK3-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP165]], 1
1250 // CHECK3-NEXT:    [[TMP166:%.*]] = load i32*, i32** @Gblc, align 4
1251 // CHECK3-NEXT:    store i32* [[TMP166]], i32** [[_TMP38]], align 4
1252 // CHECK3-NEXT:    [[TMP167:%.*]] = load i32, i32* @Gbla, align 4
1253 // CHECK3-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP167]], 2
1254 // CHECK3-NEXT:    [[KERNEL_ARGS40:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1255 // CHECK3-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 0
1256 // CHECK3-NEXT:    store i32 1, i32* [[TMP168]], align 4
1257 // CHECK3-NEXT:    [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 1
1258 // CHECK3-NEXT:    store i32 2, i32* [[TMP169]], align 4
1259 // CHECK3-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 2
1260 // CHECK3-NEXT:    store i8** [[TMP162]], i8*** [[TMP170]], align 4
1261 // CHECK3-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 3
1262 // CHECK3-NEXT:    store i8** [[TMP163]], i8*** [[TMP171]], align 4
1263 // CHECK3-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 4
1264 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP172]], align 4
1265 // CHECK3-NEXT:    [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 5
1266 // CHECK3-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP173]], align 4
1267 // CHECK3-NEXT:    [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 6
1268 // CHECK3-NEXT:    store i8** null, i8*** [[TMP174]], align 4
1269 // CHECK3-NEXT:    [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 7
1270 // CHECK3-NEXT:    store i8** null, i8*** [[TMP175]], align 4
1271 // CHECK3-NEXT:    [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]], i32 0, i32 8
1272 // CHECK3-NEXT:    store i64 0, i64* [[TMP176]], align 8
1273 // CHECK3-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[ADD37]], i32 [[ADD39]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS40]])
1274 // CHECK3-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
1275 // CHECK3-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]]
1276 // CHECK3:       omp_offload.failed41:
1277 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71(i32 [[TMP149]], i32 [[TMP151]]) #[[ATTR2]]
1278 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT42]]
1279 // CHECK3:       omp_offload.cont42:
1280 // CHECK3-NEXT:    [[TMP179:%.*]] = load i32, i32* [[COMP]], align 4
1281 // CHECK3-NEXT:    ret i32 [[TMP179]]
1282 //
1283 //
1284 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l31
1285 // CHECK3-SAME: (i32 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
1286 // CHECK3-NEXT:  entry:
1287 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1288 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1289 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1290 // CHECK3-NEXT:    ret void
1291 //
1292 //
1293 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1294 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1295 // CHECK3-NEXT:  entry:
1296 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1297 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1298 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1299 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1300 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1301 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1302 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1303 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1304 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1305 // CHECK3-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1306 // CHECK3-NEXT:    ret void
1307 //
1308 //
1309 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l37
1310 // CHECK3-SAME: (i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1311 // CHECK3-NEXT:  entry:
1312 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1313 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1314 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1315 // CHECK3-NEXT:    ret void
1316 //
1317 //
1318 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
1319 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1320 // CHECK3-NEXT:  entry:
1321 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1322 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1323 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1324 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1325 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1326 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1327 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1328 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1329 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1330 // CHECK3-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1331 // CHECK3-NEXT:    ret void
1332 //
1333 //
1334 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l46
1335 // CHECK3-SAME: (i32 noundef [[LA:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1336 // CHECK3-NEXT:  entry:
1337 // CHECK3-NEXT:    [[LA_ADDR:%.*]] = alloca i32, align 4
1338 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1339 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1340 // CHECK3-NEXT:    store i32 [[LA]], i32* [[LA_ADDR]], align 4
1341 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1342 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[LA_ADDR]], align 4
1343 // CHECK3-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 0)
1344 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1345 // CHECK3-NEXT:    ret void
1346 //
1347 //
1348 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
1349 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1350 // CHECK3-NEXT:  entry:
1351 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1352 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1353 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1354 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1355 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1356 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1357 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1358 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1359 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1360 // CHECK3-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1361 // CHECK3-NEXT:    ret void
1362 //
1363 //
1364 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l53
1365 // CHECK3-SAME: (i32 noundef [[LA:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1366 // CHECK3-NEXT:  entry:
1367 // CHECK3-NEXT:    [[LA_ADDR:%.*]] = alloca i32, align 4
1368 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1369 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1370 // CHECK3-NEXT:    store i32 [[LA]], i32* [[LA_ADDR]], align 4
1371 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1372 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[LA_ADDR]], align 4
1373 // CHECK3-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]])
1374 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1375 // CHECK3-NEXT:    ret void
1376 //
1377 //
1378 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
1379 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1380 // CHECK3-NEXT:  entry:
1381 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1382 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1383 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1384 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1385 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1386 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1387 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1388 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1389 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1390 // CHECK3-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1391 // CHECK3-NEXT:    ret void
1392 //
1393 //
1394 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l62
1395 // CHECK3-SAME: (i32 noundef [[GBLA:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[GBLB:%.*]], i32 noundef [[LC:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1396 // CHECK3-NEXT:  entry:
1397 // CHECK3-NEXT:    [[GBLA_ADDR:%.*]] = alloca i32, align 4
1398 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1399 // CHECK3-NEXT:    [[GBLB_ADDR:%.*]] = alloca i64*, align 4
1400 // CHECK3-NEXT:    [[LC_ADDR:%.*]] = alloca i32, align 4
1401 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1402 // CHECK3-NEXT:    [[GBLB1:%.*]] = alloca i64, align 8
1403 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1404 // CHECK3-NEXT:    store i32 [[GBLA]], i32* [[GBLA_ADDR]], align 4
1405 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1406 // CHECK3-NEXT:    store i64* [[GBLB]], i64** [[GBLB_ADDR]], align 4
1407 // CHECK3-NEXT:    store i32 [[LC]], i32* [[LC_ADDR]], align 4
1408 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1409 // CHECK3-NEXT:    [[TMP1:%.*]] = load i64*, i64** [[GBLB_ADDR]], align 4
1410 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[LC_ADDR]] to float*
1411 // CHECK3-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
1412 // CHECK3-NEXT:    store i64 [[TMP2]], i64* [[GBLB1]], align 8
1413 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[GBLA_ADDR]], align 4
1414 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
1415 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP4]]
1416 // CHECK3-NEXT:    [[TMP5:%.*]] = load i64, i64* [[GBLB1]], align 8
1417 // CHECK3-NEXT:    [[TMP6:%.*]] = load float, float* [[CONV]], align 4
1418 // CHECK3-NEXT:    [[CONV2:%.*]] = fptosi float [[TMP6]] to i64
1419 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i64 [[TMP5]], [[CONV2]]
1420 // CHECK3-NEXT:    [[TMP7:%.*]] = trunc i64 [[ADD3]] to i32
1421 // CHECK3-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[ADD]], i32 [[TMP7]])
1422 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1423 // CHECK3-NEXT:    ret void
1424 //
1425 //
1426 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
1427 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1428 // CHECK3-NEXT:  entry:
1429 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1430 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1431 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1432 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1433 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1434 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1435 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1436 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1437 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1438 // CHECK3-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1439 // CHECK3-NEXT:    ret void
1440 //
1441 //
1442 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z27teams_argument_global_locali_l71
1443 // CHECK3-SAME: (i32 noundef [[GBLC:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1444 // CHECK3-NEXT:  entry:
1445 // CHECK3-NEXT:    [[GBLC_ADDR:%.*]] = alloca i32, align 4
1446 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1447 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32*, align 4
1448 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1449 // CHECK3-NEXT:    store i32 [[GBLC]], i32* [[GBLC_ADDR]], align 4
1450 // CHECK3-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1451 // CHECK3-NEXT:    store i32* [[GBLC_ADDR]], i32** [[TMP]], align 4
1452 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* @Gbla, align 4
1453 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], 1
1454 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* @Gbla, align 4
1455 // CHECK3-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP2]], 2
1456 // CHECK3-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[ADD]], i32 [[ADD1]])
1457 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP]], align 4
1458 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]], i32* [[TMP3]])
1459 // CHECK3-NEXT:    ret void
1460 //
1461 //
1462 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13
1463 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[GBLC:%.*]]) #[[ATTR1]] {
1464 // CHECK3-NEXT:  entry:
1465 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1466 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1467 // CHECK3-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1468 // CHECK3-NEXT:    [[GBLC_ADDR:%.*]] = alloca i32*, align 4
1469 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32*, align 4
1470 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1471 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1472 // CHECK3-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1473 // CHECK3-NEXT:    store i32* [[GBLC]], i32** [[GBLC_ADDR]], align 4
1474 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1475 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[GBLC_ADDR]], align 4
1476 // CHECK3-NEXT:    store i32* [[TMP1]], i32** [[TMP]], align 4
1477 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* @Gbla, align 4
1478 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
1479 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
1480 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
1481 // CHECK3-NEXT:    ret void
1482 //
1483 //
1484 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1485 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
1486 // CHECK3-NEXT:  entry:
1487 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
1488 // CHECK3-NEXT:    ret void
1489 //
1490 //
1491 // CHECK9-LABEL: define {{[^@]+}}@_Z18teams_template_argv
1492 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
1493 // CHECK9-NEXT:  entry:
1494 // CHECK9-NEXT:    [[COMP:%.*]] = alloca i32, align 4
1495 // CHECK9-NEXT:    [[LA:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1496 // CHECK9-NEXT:    [[LB:%.*]] = alloca [[STRUCT_SS_0:%.*]], align 8
1497 // CHECK9-NEXT:    [[COMP_CASTED:%.*]] = alloca i64, align 8
1498 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1499 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1500 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1501 // CHECK9-NEXT:    [[COMP_CASTED2:%.*]] = alloca i64, align 8
1502 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 8
1503 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 8
1504 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 8
1505 // CHECK9-NEXT:    store i32 1, i32* [[COMP]], align 4
1506 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
1507 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_CASTED]] to i32*
1508 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1509 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[COMP_CASTED]], align 8
1510 // CHECK9-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1511 // CHECK9-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %struct.SS**
1512 // CHECK9-NEXT:    store %struct.SS* @Gbla, %struct.SS** [[TMP3]], align 8
1513 // CHECK9-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1514 // CHECK9-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to %struct.SS**
1515 // CHECK9-NEXT:    store %struct.SS* @Gbla, %struct.SS** [[TMP5]], align 8
1516 // CHECK9-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1517 // CHECK9-NEXT:    store i8* null, i8** [[TMP6]], align 8
1518 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1519 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to %struct.SS**
1520 // CHECK9-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[TMP8]], align 8
1521 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1522 // CHECK9-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
1523 // CHECK9-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[TMP10]], align 8
1524 // CHECK9-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1525 // CHECK9-NEXT:    store i8* null, i8** [[TMP11]], align 8
1526 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1527 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1528 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP13]], align 8
1529 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1530 // CHECK9-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
1531 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP15]], align 8
1532 // CHECK9-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1533 // CHECK9-NEXT:    store i8* null, i8** [[TMP16]], align 8
1534 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1535 // CHECK9-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1536 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* getelementptr inbounds ([[STRUCT_SS]], %struct.SS* @Gbla, i32 0, i32 0), align 4
1537 // CHECK9-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[LA]], i32 0, i32 1
1538 // CHECK9-NEXT:    [[TMP20:%.*]] = load float, float* [[B]], align 4
1539 // CHECK9-NEXT:    [[CONV1:%.*]] = fptosi float [[TMP20]] to i64
1540 // CHECK9-NEXT:    [[TMP21:%.*]] = trunc i64 [[CONV1]] to i32
1541 // CHECK9-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1542 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
1543 // CHECK9-NEXT:    store i32 1, i32* [[TMP22]], align 4
1544 // CHECK9-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
1545 // CHECK9-NEXT:    store i32 3, i32* [[TMP23]], align 4
1546 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
1547 // CHECK9-NEXT:    store i8** [[TMP17]], i8*** [[TMP24]], align 8
1548 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
1549 // CHECK9-NEXT:    store i8** [[TMP18]], i8*** [[TMP25]], align 8
1550 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
1551 // CHECK9-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP26]], align 8
1552 // CHECK9-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
1553 // CHECK9-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP27]], align 8
1554 // CHECK9-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
1555 // CHECK9-NEXT:    store i8** null, i8*** [[TMP28]], align 8
1556 // CHECK9-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
1557 // CHECK9-NEXT:    store i8** null, i8*** [[TMP29]], align 8
1558 // CHECK9-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
1559 // CHECK9-NEXT:    store i64 0, i64* [[TMP30]], align 8
1560 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 [[TMP19]], i32 [[TMP21]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
1561 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
1562 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1563 // CHECK9:       omp_offload.failed:
1564 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116(%struct.SS* @Gbla, %struct.SS* [[LA]], i64 [[TMP1]]) #[[ATTR2:[0-9]+]]
1565 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1566 // CHECK9:       omp_offload.cont:
1567 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[COMP]], align 4
1568 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[COMP_CASTED2]] to i32*
1569 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV3]], align 4
1570 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[COMP_CASTED2]], align 8
1571 // CHECK9-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
1572 // CHECK9-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.SS.0**
1573 // CHECK9-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[TMP36]], align 8
1574 // CHECK9-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
1575 // CHECK9-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.SS.0**
1576 // CHECK9-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[TMP38]], align 8
1577 // CHECK9-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
1578 // CHECK9-NEXT:    store i8* null, i8** [[TMP39]], align 8
1579 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
1580 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.SS.0**
1581 // CHECK9-NEXT:    store %struct.SS.0* @Gblb, %struct.SS.0** [[TMP41]], align 8
1582 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
1583 // CHECK9-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to %struct.SS.0**
1584 // CHECK9-NEXT:    store %struct.SS.0* @Gblb, %struct.SS.0** [[TMP43]], align 8
1585 // CHECK9-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 1
1586 // CHECK9-NEXT:    store i8* null, i8** [[TMP44]], align 8
1587 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
1588 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
1589 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP46]], align 8
1590 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
1591 // CHECK9-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
1592 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP48]], align 8
1593 // CHECK9-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 2
1594 // CHECK9-NEXT:    store i8* null, i8** [[TMP49]], align 8
1595 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
1596 // CHECK9-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
1597 // CHECK9-NEXT:    [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS_0]], %struct.SS.0* [[LB]], i32 0, i32 1
1598 // CHECK9-NEXT:    [[TMP52:%.*]] = load float, float* [[B7]], align 8
1599 // CHECK9-NEXT:    [[CONV8:%.*]] = fptosi float [[TMP52]] to i64
1600 // CHECK9-NEXT:    [[TMP53:%.*]] = trunc i64 [[CONV8]] to i32
1601 // CHECK9-NEXT:    [[TMP54:%.*]] = load i64, i64* getelementptr inbounds ([[STRUCT_SS_0]], %struct.SS.0* @Gblb, i32 0, i32 0), align 8
1602 // CHECK9-NEXT:    [[TMP55:%.*]] = trunc i64 [[TMP54]] to i32
1603 // CHECK9-NEXT:    [[KERNEL_ARGS9:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1604 // CHECK9-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 0
1605 // CHECK9-NEXT:    store i32 1, i32* [[TMP56]], align 4
1606 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 1
1607 // CHECK9-NEXT:    store i32 3, i32* [[TMP57]], align 4
1608 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 2
1609 // CHECK9-NEXT:    store i8** [[TMP50]], i8*** [[TMP58]], align 8
1610 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 3
1611 // CHECK9-NEXT:    store i8** [[TMP51]], i8*** [[TMP59]], align 8
1612 // CHECK9-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 4
1613 // CHECK9-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP60]], align 8
1614 // CHECK9-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 5
1615 // CHECK9-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP61]], align 8
1616 // CHECK9-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 6
1617 // CHECK9-NEXT:    store i8** null, i8*** [[TMP62]], align 8
1618 // CHECK9-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 7
1619 // CHECK9-NEXT:    store i8** null, i8*** [[TMP63]], align 8
1620 // CHECK9-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 8
1621 // CHECK9-NEXT:    store i64 0, i64* [[TMP64]], align 8
1622 // CHECK9-NEXT:    [[TMP65:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[TMP53]], i32 [[TMP55]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]])
1623 // CHECK9-NEXT:    [[TMP66:%.*]] = icmp ne i32 [[TMP65]], 0
1624 // CHECK9-NEXT:    br i1 [[TMP66]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]]
1625 // CHECK9:       omp_offload.failed10:
1626 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125(%struct.SS.0* [[LB]], %struct.SS.0* @Gblb, i64 [[TMP34]]) #[[ATTR2]]
1627 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT11]]
1628 // CHECK9:       omp_offload.cont11:
1629 // CHECK9-NEXT:    [[TMP67:%.*]] = load i32, i32* [[COMP]], align 4
1630 // CHECK9-NEXT:    ret i32 [[TMP67]]
1631 //
1632 //
1633 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116
1634 // CHECK9-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(8) [[GBLA:%.*]], %struct.SS* noundef nonnull align 4 dereferenceable(8) [[LA:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
1635 // CHECK9-NEXT:  entry:
1636 // CHECK9-NEXT:    [[GBLA_ADDR:%.*]] = alloca %struct.SS*, align 8
1637 // CHECK9-NEXT:    [[LA_ADDR:%.*]] = alloca %struct.SS*, align 8
1638 // CHECK9-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
1639 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1640 // CHECK9-NEXT:    store %struct.SS* [[GBLA]], %struct.SS** [[GBLA_ADDR]], align 8
1641 // CHECK9-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[LA_ADDR]], align 8
1642 // CHECK9-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
1643 // CHECK9-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[GBLA_ADDR]], align 8
1644 // CHECK9-NEXT:    [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[LA_ADDR]], align 8
1645 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
1646 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 0
1647 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
1648 // CHECK9-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[TMP2]], i32 0, i32 1
1649 // CHECK9-NEXT:    [[TMP4:%.*]] = load float, float* [[B]], align 4
1650 // CHECK9-NEXT:    [[CONV1:%.*]] = fptosi float [[TMP4]] to i64
1651 // CHECK9-NEXT:    [[TMP5:%.*]] = trunc i64 [[CONV1]] to i32
1652 // CHECK9-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 [[TMP5]])
1653 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
1654 // CHECK9-NEXT:    ret void
1655 //
1656 //
1657 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
1658 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1659 // CHECK9-NEXT:  entry:
1660 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1661 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1662 // CHECK9-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
1663 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1664 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1665 // CHECK9-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
1666 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
1667 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1668 // CHECK9-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1669 // CHECK9-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1670 // CHECK9-NEXT:    ret void
1671 //
1672 //
1673 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125
1674 // CHECK9-SAME: (%struct.SS.0* noundef nonnull align 8 dereferenceable(16) [[LB:%.*]], %struct.SS.0* noundef nonnull align 8 dereferenceable(16) [[GBLB:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1]] {
1675 // CHECK9-NEXT:  entry:
1676 // CHECK9-NEXT:    [[LB_ADDR:%.*]] = alloca %struct.SS.0*, align 8
1677 // CHECK9-NEXT:    [[GBLB_ADDR:%.*]] = alloca %struct.SS.0*, align 8
1678 // CHECK9-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
1679 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1680 // CHECK9-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[LB_ADDR]], align 8
1681 // CHECK9-NEXT:    store %struct.SS.0* [[GBLB]], %struct.SS.0** [[GBLB_ADDR]], align 8
1682 // CHECK9-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
1683 // CHECK9-NEXT:    [[TMP1:%.*]] = load %struct.SS.0*, %struct.SS.0** [[LB_ADDR]], align 8
1684 // CHECK9-NEXT:    [[TMP2:%.*]] = load %struct.SS.0*, %struct.SS.0** [[GBLB_ADDR]], align 8
1685 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
1686 // CHECK9-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS_0:%.*]], %struct.SS.0* [[TMP1]], i32 0, i32 1
1687 // CHECK9-NEXT:    [[TMP3:%.*]] = load float, float* [[B]], align 8
1688 // CHECK9-NEXT:    [[CONV1:%.*]] = fptosi float [[TMP3]] to i64
1689 // CHECK9-NEXT:    [[TMP4:%.*]] = trunc i64 [[CONV1]] to i32
1690 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS_0]], %struct.SS.0* [[TMP2]], i32 0, i32 0
1691 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A]], align 8
1692 // CHECK9-NEXT:    [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
1693 // CHECK9-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP4]], i32 [[TMP6]])
1694 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
1695 // CHECK9-NEXT:    ret void
1696 //
1697 //
1698 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
1699 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1700 // CHECK9-NEXT:  entry:
1701 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1702 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1703 // CHECK9-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
1704 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1705 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1706 // CHECK9-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
1707 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
1708 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1709 // CHECK9-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1710 // CHECK9-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1711 // CHECK9-NEXT:    ret void
1712 //
1713 //
1714 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1715 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] {
1716 // CHECK9-NEXT:  entry:
1717 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
1718 // CHECK9-NEXT:    ret void
1719 //
1720 //
1721 // CHECK11-LABEL: define {{[^@]+}}@_Z18teams_template_argv
1722 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
1723 // CHECK11-NEXT:  entry:
1724 // CHECK11-NEXT:    [[COMP:%.*]] = alloca i32, align 4
1725 // CHECK11-NEXT:    [[LA:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1726 // CHECK11-NEXT:    [[LB:%.*]] = alloca [[STRUCT_SS_0:%.*]], align 4
1727 // CHECK11-NEXT:    [[COMP_CASTED:%.*]] = alloca i32, align 4
1728 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
1729 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
1730 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
1731 // CHECK11-NEXT:    [[COMP_CASTED1:%.*]] = alloca i32, align 4
1732 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS2:%.*]] = alloca [3 x i8*], align 4
1733 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS3:%.*]] = alloca [3 x i8*], align 4
1734 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS4:%.*]] = alloca [3 x i8*], align 4
1735 // CHECK11-NEXT:    store i32 1, i32* [[COMP]], align 4
1736 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
1737 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[COMP_CASTED]], align 4
1738 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[COMP_CASTED]], align 4
1739 // CHECK11-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1740 // CHECK11-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %struct.SS**
1741 // CHECK11-NEXT:    store %struct.SS* @Gbla, %struct.SS** [[TMP3]], align 4
1742 // CHECK11-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1743 // CHECK11-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to %struct.SS**
1744 // CHECK11-NEXT:    store %struct.SS* @Gbla, %struct.SS** [[TMP5]], align 4
1745 // CHECK11-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1746 // CHECK11-NEXT:    store i8* null, i8** [[TMP6]], align 4
1747 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1748 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to %struct.SS**
1749 // CHECK11-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[TMP8]], align 4
1750 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1751 // CHECK11-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
1752 // CHECK11-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[TMP10]], align 4
1753 // CHECK11-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
1754 // CHECK11-NEXT:    store i8* null, i8** [[TMP11]], align 4
1755 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1756 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
1757 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP13]], align 4
1758 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1759 // CHECK11-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
1760 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP15]], align 4
1761 // CHECK11-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
1762 // CHECK11-NEXT:    store i8* null, i8** [[TMP16]], align 4
1763 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1764 // CHECK11-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1765 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* getelementptr inbounds ([[STRUCT_SS]], %struct.SS* @Gbla, i32 0, i32 0), align 4
1766 // CHECK11-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[LA]], i32 0, i32 1
1767 // CHECK11-NEXT:    [[TMP20:%.*]] = load float, float* [[B]], align 4
1768 // CHECK11-NEXT:    [[CONV:%.*]] = fptosi float [[TMP20]] to i64
1769 // CHECK11-NEXT:    [[TMP21:%.*]] = trunc i64 [[CONV]] to i32
1770 // CHECK11-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1771 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
1772 // CHECK11-NEXT:    store i32 1, i32* [[TMP22]], align 4
1773 // CHECK11-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
1774 // CHECK11-NEXT:    store i32 3, i32* [[TMP23]], align 4
1775 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
1776 // CHECK11-NEXT:    store i8** [[TMP17]], i8*** [[TMP24]], align 4
1777 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
1778 // CHECK11-NEXT:    store i8** [[TMP18]], i8*** [[TMP25]], align 4
1779 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
1780 // CHECK11-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP26]], align 4
1781 // CHECK11-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
1782 // CHECK11-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP27]], align 4
1783 // CHECK11-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
1784 // CHECK11-NEXT:    store i8** null, i8*** [[TMP28]], align 4
1785 // CHECK11-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
1786 // CHECK11-NEXT:    store i8** null, i8*** [[TMP29]], align 4
1787 // CHECK11-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
1788 // CHECK11-NEXT:    store i64 0, i64* [[TMP30]], align 8
1789 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 [[TMP19]], i32 [[TMP21]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
1790 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
1791 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1792 // CHECK11:       omp_offload.failed:
1793 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116(%struct.SS* @Gbla, %struct.SS* [[LA]], i32 [[TMP1]]) #[[ATTR2:[0-9]+]]
1794 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1795 // CHECK11:       omp_offload.cont:
1796 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[COMP]], align 4
1797 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[COMP_CASTED1]], align 4
1798 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[COMP_CASTED1]], align 4
1799 // CHECK11-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 0
1800 // CHECK11-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.SS.0**
1801 // CHECK11-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[TMP36]], align 4
1802 // CHECK11-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 0
1803 // CHECK11-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.SS.0**
1804 // CHECK11-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[TMP38]], align 4
1805 // CHECK11-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS4]], i32 0, i32 0
1806 // CHECK11-NEXT:    store i8* null, i8** [[TMP39]], align 4
1807 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 1
1808 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.SS.0**
1809 // CHECK11-NEXT:    store %struct.SS.0* @Gblb, %struct.SS.0** [[TMP41]], align 4
1810 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 1
1811 // CHECK11-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to %struct.SS.0**
1812 // CHECK11-NEXT:    store %struct.SS.0* @Gblb, %struct.SS.0** [[TMP43]], align 4
1813 // CHECK11-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS4]], i32 0, i32 1
1814 // CHECK11-NEXT:    store i8* null, i8** [[TMP44]], align 4
1815 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 2
1816 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
1817 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP46]], align 4
1818 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 2
1819 // CHECK11-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32*
1820 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP48]], align 4
1821 // CHECK11-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS4]], i32 0, i32 2
1822 // CHECK11-NEXT:    store i8* null, i8** [[TMP49]], align 4
1823 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS2]], i32 0, i32 0
1824 // CHECK11-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS3]], i32 0, i32 0
1825 // CHECK11-NEXT:    [[B5:%.*]] = getelementptr inbounds [[STRUCT_SS_0]], %struct.SS.0* [[LB]], i32 0, i32 1
1826 // CHECK11-NEXT:    [[TMP52:%.*]] = load float, float* [[B5]], align 4
1827 // CHECK11-NEXT:    [[CONV6:%.*]] = fptosi float [[TMP52]] to i64
1828 // CHECK11-NEXT:    [[TMP53:%.*]] = trunc i64 [[CONV6]] to i32
1829 // CHECK11-NEXT:    [[TMP54:%.*]] = load i64, i64* getelementptr inbounds ([[STRUCT_SS_0]], %struct.SS.0* @Gblb, i32 0, i32 0), align 4
1830 // CHECK11-NEXT:    [[TMP55:%.*]] = trunc i64 [[TMP54]] to i32
1831 // CHECK11-NEXT:    [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1832 // CHECK11-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 0
1833 // CHECK11-NEXT:    store i32 1, i32* [[TMP56]], align 4
1834 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 1
1835 // CHECK11-NEXT:    store i32 3, i32* [[TMP57]], align 4
1836 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 2
1837 // CHECK11-NEXT:    store i8** [[TMP50]], i8*** [[TMP58]], align 4
1838 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 3
1839 // CHECK11-NEXT:    store i8** [[TMP51]], i8*** [[TMP59]], align 4
1840 // CHECK11-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 4
1841 // CHECK11-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP60]], align 4
1842 // CHECK11-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 5
1843 // CHECK11-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP61]], align 4
1844 // CHECK11-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 6
1845 // CHECK11-NEXT:    store i8** null, i8*** [[TMP62]], align 4
1846 // CHECK11-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 7
1847 // CHECK11-NEXT:    store i8** null, i8*** [[TMP63]], align 4
1848 // CHECK11-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]], i32 0, i32 8
1849 // CHECK11-NEXT:    store i64 0, i64* [[TMP64]], align 8
1850 // CHECK11-NEXT:    [[TMP65:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 [[TMP53]], i32 [[TMP55]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS7]])
1851 // CHECK11-NEXT:    [[TMP66:%.*]] = icmp ne i32 [[TMP65]], 0
1852 // CHECK11-NEXT:    br i1 [[TMP66]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
1853 // CHECK11:       omp_offload.failed8:
1854 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125(%struct.SS.0* [[LB]], %struct.SS.0* @Gblb, i32 [[TMP34]]) #[[ATTR2]]
1855 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT9]]
1856 // CHECK11:       omp_offload.cont9:
1857 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[COMP]], align 4
1858 // CHECK11-NEXT:    ret i32 [[TMP67]]
1859 //
1860 //
1861 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l116
1862 // CHECK11-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(8) [[GBLA:%.*]], %struct.SS* noundef nonnull align 4 dereferenceable(8) [[LA:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
1863 // CHECK11-NEXT:  entry:
1864 // CHECK11-NEXT:    [[GBLA_ADDR:%.*]] = alloca %struct.SS*, align 4
1865 // CHECK11-NEXT:    [[LA_ADDR:%.*]] = alloca %struct.SS*, align 4
1866 // CHECK11-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1867 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1868 // CHECK11-NEXT:    store %struct.SS* [[GBLA]], %struct.SS** [[GBLA_ADDR]], align 4
1869 // CHECK11-NEXT:    store %struct.SS* [[LA]], %struct.SS** [[LA_ADDR]], align 4
1870 // CHECK11-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1871 // CHECK11-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[GBLA_ADDR]], align 4
1872 // CHECK11-NEXT:    [[TMP2:%.*]] = load %struct.SS*, %struct.SS** [[LA_ADDR]], align 4
1873 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 0
1874 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
1875 // CHECK11-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[TMP2]], i32 0, i32 1
1876 // CHECK11-NEXT:    [[TMP4:%.*]] = load float, float* [[B]], align 4
1877 // CHECK11-NEXT:    [[CONV:%.*]] = fptosi float [[TMP4]] to i64
1878 // CHECK11-NEXT:    [[TMP5:%.*]] = trunc i64 [[CONV]] to i32
1879 // CHECK11-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP3]], i32 [[TMP5]])
1880 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1881 // CHECK11-NEXT:    ret void
1882 //
1883 //
1884 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
1885 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1886 // CHECK11-NEXT:  entry:
1887 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1888 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1889 // CHECK11-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1890 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1891 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1892 // CHECK11-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1893 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1894 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1895 // CHECK11-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1896 // CHECK11-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1897 // CHECK11-NEXT:    ret void
1898 //
1899 //
1900 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18teams_template_argv_l125
1901 // CHECK11-SAME: (%struct.SS.0* noundef nonnull align 4 dereferenceable(12) [[LB:%.*]], %struct.SS.0* noundef nonnull align 4 dereferenceable(12) [[GBLB:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1]] {
1902 // CHECK11-NEXT:  entry:
1903 // CHECK11-NEXT:    [[LB_ADDR:%.*]] = alloca %struct.SS.0*, align 4
1904 // CHECK11-NEXT:    [[GBLB_ADDR:%.*]] = alloca %struct.SS.0*, align 4
1905 // CHECK11-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
1906 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1907 // CHECK11-NEXT:    store %struct.SS.0* [[LB]], %struct.SS.0** [[LB_ADDR]], align 4
1908 // CHECK11-NEXT:    store %struct.SS.0* [[GBLB]], %struct.SS.0** [[GBLB_ADDR]], align 4
1909 // CHECK11-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
1910 // CHECK11-NEXT:    [[TMP1:%.*]] = load %struct.SS.0*, %struct.SS.0** [[LB_ADDR]], align 4
1911 // CHECK11-NEXT:    [[TMP2:%.*]] = load %struct.SS.0*, %struct.SS.0** [[GBLB_ADDR]], align 4
1912 // CHECK11-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS_0:%.*]], %struct.SS.0* [[TMP1]], i32 0, i32 1
1913 // CHECK11-NEXT:    [[TMP3:%.*]] = load float, float* [[B]], align 4
1914 // CHECK11-NEXT:    [[CONV:%.*]] = fptosi float [[TMP3]] to i64
1915 // CHECK11-NEXT:    [[TMP4:%.*]] = trunc i64 [[CONV]] to i32
1916 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS_0]], %struct.SS.0* [[TMP2]], i32 0, i32 0
1917 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A]], align 4
1918 // CHECK11-NEXT:    [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
1919 // CHECK11-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP4]], i32 [[TMP6]])
1920 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
1921 // CHECK11-NEXT:    ret void
1922 //
1923 //
1924 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
1925 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
1926 // CHECK11-NEXT:  entry:
1927 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1928 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1929 // CHECK11-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
1930 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1931 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1932 // CHECK11-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
1933 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
1934 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1935 // CHECK11-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
1936 // CHECK11-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
1937 // CHECK11-NEXT:    ret void
1938 //
1939 //
1940 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1941 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] {
1942 // CHECK11-NEXT:  entry:
1943 // CHECK11-NEXT:    call void @__tgt_register_requires(i64 1)
1944 // CHECK11-NEXT:    ret void
1945 //
1946 //
1947 // CHECK17-LABEL: define {{[^@]+}}@_Z21teams_template_structv
1948 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] {
1949 // CHECK17-NEXT:  entry:
1950 // CHECK17-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1951 // CHECK17-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef nonnull align 4 dereferenceable(8) [[V]])
1952 // CHECK17-NEXT:    ret i32 [[CALL]]
1953 //
1954 //
1955 // CHECK17-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
1956 // CHECK17-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
1957 // CHECK17-NEXT:  entry:
1958 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
1959 // CHECK17-NEXT:    [[COMP:%.*]] = alloca i32, align 4
1960 // CHECK17-NEXT:    [[COMP_CASTED:%.*]] = alloca i64, align 8
1961 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
1962 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
1963 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
1964 // CHECK17-NEXT:    [[COMP_CASTED3:%.*]] = alloca i64, align 8
1965 // CHECK17-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [2 x i8*], align 8
1966 // CHECK17-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [2 x i8*], align 8
1967 // CHECK17-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [2 x i8*], align 8
1968 // CHECK17-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
1969 // CHECK17-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
1970 // CHECK17-NEXT:    store i32 1, i32* [[COMP]], align 4
1971 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
1972 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_CASTED]] to i32*
1973 // CHECK17-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1974 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[COMP_CASTED]], align 8
1975 // CHECK17-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
1976 // CHECK17-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1977 // CHECK17-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %struct.SS**
1978 // CHECK17-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP3]], align 8
1979 // CHECK17-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1980 // CHECK17-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32**
1981 // CHECK17-NEXT:    store i32* [[A]], i32** [[TMP5]], align 8
1982 // CHECK17-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1983 // CHECK17-NEXT:    store i8* null, i8** [[TMP6]], align 8
1984 // CHECK17-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1985 // CHECK17-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
1986 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
1987 // CHECK17-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1988 // CHECK17-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
1989 // CHECK17-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
1990 // CHECK17-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1991 // CHECK17-NEXT:    store i8* null, i8** [[TMP11]], align 8
1992 // CHECK17-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1993 // CHECK17-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1994 // CHECK17-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
1995 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A2]], align 4
1996 // CHECK17-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1997 // CHECK17-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
1998 // CHECK17-NEXT:    store i32 1, i32* [[TMP15]], align 4
1999 // CHECK17-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
2000 // CHECK17-NEXT:    store i32 2, i32* [[TMP16]], align 4
2001 // CHECK17-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
2002 // CHECK17-NEXT:    store i8** [[TMP12]], i8*** [[TMP17]], align 8
2003 // CHECK17-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
2004 // CHECK17-NEXT:    store i8** [[TMP13]], i8*** [[TMP18]], align 8
2005 // CHECK17-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
2006 // CHECK17-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP19]], align 8
2007 // CHECK17-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
2008 // CHECK17-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP20]], align 8
2009 // CHECK17-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
2010 // CHECK17-NEXT:    store i8** null, i8*** [[TMP21]], align 8
2011 // CHECK17-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
2012 // CHECK17-NEXT:    store i8** null, i8*** [[TMP22]], align 8
2013 // CHECK17-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
2014 // CHECK17-NEXT:    store i64 0, i64* [[TMP23]], align 8
2015 // CHECK17-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 [[TMP14]], i32 123, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
2016 // CHECK17-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
2017 // CHECK17-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2018 // CHECK17:       omp_offload.failed:
2019 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161(%struct.SS* [[THIS1]], i64 [[TMP1]]) #[[ATTR2:[0-9]+]]
2020 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2021 // CHECK17:       omp_offload.cont:
2022 // CHECK17-NEXT:    [[TMP26:%.*]] = load i32, i32* [[COMP]], align 4
2023 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[COMP_CASTED3]] to i32*
2024 // CHECK17-NEXT:    store i32 [[TMP26]], i32* [[CONV4]], align 4
2025 // CHECK17-NEXT:    [[TMP27:%.*]] = load i64, i64* [[COMP_CASTED3]], align 8
2026 // CHECK17-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
2027 // CHECK17-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
2028 // CHECK17-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to %struct.SS**
2029 // CHECK17-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP29]], align 8
2030 // CHECK17-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
2031 // CHECK17-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to float**
2032 // CHECK17-NEXT:    store float* [[B]], float** [[TMP31]], align 8
2033 // CHECK17-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
2034 // CHECK17-NEXT:    store i8* null, i8** [[TMP32]], align 8
2035 // CHECK17-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
2036 // CHECK17-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
2037 // CHECK17-NEXT:    store i64 [[TMP27]], i64* [[TMP34]], align 8
2038 // CHECK17-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
2039 // CHECK17-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i64*
2040 // CHECK17-NEXT:    store i64 [[TMP27]], i64* [[TMP36]], align 8
2041 // CHECK17-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
2042 // CHECK17-NEXT:    store i8* null, i8** [[TMP37]], align 8
2043 // CHECK17-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
2044 // CHECK17-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
2045 // CHECK17-NEXT:    [[B8:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
2046 // CHECK17-NEXT:    [[TMP40:%.*]] = load float, float* [[B8]], align 4
2047 // CHECK17-NEXT:    [[CONV9:%.*]] = fptosi float [[TMP40]] to i32
2048 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV9]], 123
2049 // CHECK17-NEXT:    [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2050 // CHECK17-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
2051 // CHECK17-NEXT:    store i32 1, i32* [[TMP41]], align 4
2052 // CHECK17-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
2053 // CHECK17-NEXT:    store i32 2, i32* [[TMP42]], align 4
2054 // CHECK17-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
2055 // CHECK17-NEXT:    store i8** [[TMP38]], i8*** [[TMP43]], align 8
2056 // CHECK17-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
2057 // CHECK17-NEXT:    store i8** [[TMP39]], i8*** [[TMP44]], align 8
2058 // CHECK17-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
2059 // CHECK17-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP45]], align 8
2060 // CHECK17-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
2061 // CHECK17-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP46]], align 8
2062 // CHECK17-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
2063 // CHECK17-NEXT:    store i8** null, i8*** [[TMP47]], align 8
2064 // CHECK17-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
2065 // CHECK17-NEXT:    store i8** null, i8*** [[TMP48]], align 8
2066 // CHECK17-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
2067 // CHECK17-NEXT:    store i64 0, i64* [[TMP49]], align 8
2068 // CHECK17-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 456, i32 [[ADD]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
2069 // CHECK17-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
2070 // CHECK17-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
2071 // CHECK17:       omp_offload.failed11:
2072 // CHECK17-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169(%struct.SS* [[THIS1]], i64 [[TMP27]]) #[[ATTR2]]
2073 // CHECK17-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
2074 // CHECK17:       omp_offload.cont12:
2075 // CHECK17-NEXT:    [[TMP52:%.*]] = load i32, i32* [[COMP]], align 4
2076 // CHECK17-NEXT:    ret i32 [[TMP52]]
2077 //
2078 //
2079 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161
2080 // CHECK17-SAME: (%struct.SS* noundef [[THIS:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
2081 // CHECK17-NEXT:  entry:
2082 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
2083 // CHECK17-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
2084 // CHECK17-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2085 // CHECK17-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
2086 // CHECK17-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
2087 // CHECK17-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
2088 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
2089 // CHECK17-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 0
2090 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A]], align 4
2091 // CHECK17-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 123)
2092 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
2093 // CHECK17-NEXT:    ret void
2094 //
2095 //
2096 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
2097 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
2098 // CHECK17-NEXT:  entry:
2099 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2100 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2101 // CHECK17-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
2102 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2103 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2104 // CHECK17-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
2105 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
2106 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2107 // CHECK17-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
2108 // CHECK17-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
2109 // CHECK17-NEXT:    ret void
2110 //
2111 //
2112 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169
2113 // CHECK17-SAME: (%struct.SS* noundef [[THIS:%.*]], i64 noundef [[COMP:%.*]]) #[[ATTR3:[0-9]+]] {
2114 // CHECK17-NEXT:  entry:
2115 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
2116 // CHECK17-NEXT:    [[COMP_ADDR:%.*]] = alloca i64, align 8
2117 // CHECK17-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2118 // CHECK17-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
2119 // CHECK17-NEXT:    store i64 [[COMP]], i64* [[COMP_ADDR]], align 8
2120 // CHECK17-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
2121 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[COMP_ADDR]] to i32*
2122 // CHECK17-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 1
2123 // CHECK17-NEXT:    [[TMP2:%.*]] = load float, float* [[B]], align 4
2124 // CHECK17-NEXT:    [[CONV1:%.*]] = fptosi float [[TMP2]] to i32
2125 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], 123
2126 // CHECK17-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 456, i32 [[ADD]])
2127 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
2128 // CHECK17-NEXT:    ret void
2129 //
2130 //
2131 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1
2132 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
2133 // CHECK17-NEXT:  entry:
2134 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2135 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2136 // CHECK17-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 8
2137 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2138 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2139 // CHECK17-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 8
2140 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 8
2141 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2142 // CHECK17-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
2143 // CHECK17-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
2144 // CHECK17-NEXT:    ret void
2145 //
2146 //
2147 // CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2148 // CHECK17-SAME: () #[[ATTR4:[0-9]+]] {
2149 // CHECK17-NEXT:  entry:
2150 // CHECK17-NEXT:    call void @__tgt_register_requires(i64 1)
2151 // CHECK17-NEXT:    ret void
2152 //
2153 //
2154 // CHECK19-LABEL: define {{[^@]+}}@_Z21teams_template_structv
2155 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] {
2156 // CHECK19-NEXT:  entry:
2157 // CHECK19-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
2158 // CHECK19-NEXT:    [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef nonnull align 4 dereferenceable(8) [[V]])
2159 // CHECK19-NEXT:    ret i32 [[CALL]]
2160 //
2161 //
2162 // CHECK19-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
2163 // CHECK19-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
2164 // CHECK19-NEXT:  entry:
2165 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2166 // CHECK19-NEXT:    [[COMP:%.*]] = alloca i32, align 4
2167 // CHECK19-NEXT:    [[COMP_CASTED:%.*]] = alloca i32, align 4
2168 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
2169 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
2170 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
2171 // CHECK19-NEXT:    [[COMP_CASTED3:%.*]] = alloca i32, align 4
2172 // CHECK19-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [2 x i8*], align 4
2173 // CHECK19-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [2 x i8*], align 4
2174 // CHECK19-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [2 x i8*], align 4
2175 // CHECK19-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2176 // CHECK19-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2177 // CHECK19-NEXT:    store i32 1, i32* [[COMP]], align 4
2178 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COMP]], align 4
2179 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[COMP_CASTED]], align 4
2180 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[COMP_CASTED]], align 4
2181 // CHECK19-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
2182 // CHECK19-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2183 // CHECK19-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %struct.SS**
2184 // CHECK19-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP3]], align 4
2185 // CHECK19-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2186 // CHECK19-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32**
2187 // CHECK19-NEXT:    store i32* [[A]], i32** [[TMP5]], align 4
2188 // CHECK19-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2189 // CHECK19-NEXT:    store i8* null, i8** [[TMP6]], align 4
2190 // CHECK19-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2191 // CHECK19-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
2192 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
2193 // CHECK19-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2194 // CHECK19-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
2195 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
2196 // CHECK19-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
2197 // CHECK19-NEXT:    store i8* null, i8** [[TMP11]], align 4
2198 // CHECK19-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2199 // CHECK19-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2200 // CHECK19-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
2201 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A2]], align 4
2202 // CHECK19-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2203 // CHECK19-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
2204 // CHECK19-NEXT:    store i32 1, i32* [[TMP15]], align 4
2205 // CHECK19-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
2206 // CHECK19-NEXT:    store i32 2, i32* [[TMP16]], align 4
2207 // CHECK19-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
2208 // CHECK19-NEXT:    store i8** [[TMP12]], i8*** [[TMP17]], align 4
2209 // CHECK19-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
2210 // CHECK19-NEXT:    store i8** [[TMP13]], i8*** [[TMP18]], align 4
2211 // CHECK19-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
2212 // CHECK19-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP19]], align 4
2213 // CHECK19-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
2214 // CHECK19-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP20]], align 4
2215 // CHECK19-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
2216 // CHECK19-NEXT:    store i8** null, i8*** [[TMP21]], align 4
2217 // CHECK19-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
2218 // CHECK19-NEXT:    store i8** null, i8*** [[TMP22]], align 4
2219 // CHECK19-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
2220 // CHECK19-NEXT:    store i64 0, i64* [[TMP23]], align 8
2221 // CHECK19-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 [[TMP14]], i32 123, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
2222 // CHECK19-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
2223 // CHECK19-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2224 // CHECK19:       omp_offload.failed:
2225 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161(%struct.SS* [[THIS1]], i32 [[TMP1]]) #[[ATTR2:[0-9]+]]
2226 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2227 // CHECK19:       omp_offload.cont:
2228 // CHECK19-NEXT:    [[TMP26:%.*]] = load i32, i32* [[COMP]], align 4
2229 // CHECK19-NEXT:    store i32 [[TMP26]], i32* [[COMP_CASTED3]], align 4
2230 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[COMP_CASTED3]], align 4
2231 // CHECK19-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
2232 // CHECK19-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
2233 // CHECK19-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to %struct.SS**
2234 // CHECK19-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP29]], align 4
2235 // CHECK19-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
2236 // CHECK19-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to float**
2237 // CHECK19-NEXT:    store float* [[B]], float** [[TMP31]], align 4
2238 // CHECK19-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
2239 // CHECK19-NEXT:    store i8* null, i8** [[TMP32]], align 4
2240 // CHECK19-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
2241 // CHECK19-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32*
2242 // CHECK19-NEXT:    store i32 [[TMP27]], i32* [[TMP34]], align 4
2243 // CHECK19-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
2244 // CHECK19-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32*
2245 // CHECK19-NEXT:    store i32 [[TMP27]], i32* [[TMP36]], align 4
2246 // CHECK19-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
2247 // CHECK19-NEXT:    store i8* null, i8** [[TMP37]], align 4
2248 // CHECK19-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
2249 // CHECK19-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
2250 // CHECK19-NEXT:    [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
2251 // CHECK19-NEXT:    [[TMP40:%.*]] = load float, float* [[B7]], align 4
2252 // CHECK19-NEXT:    [[CONV:%.*]] = fptosi float [[TMP40]] to i32
2253 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV]], 123
2254 // CHECK19-NEXT:    [[KERNEL_ARGS8:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2255 // CHECK19-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 0
2256 // CHECK19-NEXT:    store i32 1, i32* [[TMP41]], align 4
2257 // CHECK19-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 1
2258 // CHECK19-NEXT:    store i32 2, i32* [[TMP42]], align 4
2259 // CHECK19-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 2
2260 // CHECK19-NEXT:    store i8** [[TMP38]], i8*** [[TMP43]], align 4
2261 // CHECK19-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 3
2262 // CHECK19-NEXT:    store i8** [[TMP39]], i8*** [[TMP44]], align 4
2263 // CHECK19-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 4
2264 // CHECK19-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP45]], align 4
2265 // CHECK19-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 5
2266 // CHECK19-NEXT:    store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP46]], align 4
2267 // CHECK19-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 6
2268 // CHECK19-NEXT:    store i8** null, i8*** [[TMP47]], align 4
2269 // CHECK19-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 7
2270 // CHECK19-NEXT:    store i8** null, i8*** [[TMP48]], align 4
2271 // CHECK19-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]], i32 0, i32 8
2272 // CHECK19-NEXT:    store i64 0, i64* [[TMP49]], align 8
2273 // CHECK19-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 456, i32 [[ADD]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS8]])
2274 // CHECK19-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
2275 // CHECK19-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
2276 // CHECK19:       omp_offload.failed9:
2277 // CHECK19-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169(%struct.SS* [[THIS1]], i32 [[TMP27]]) #[[ATTR2]]
2278 // CHECK19-NEXT:    br label [[OMP_OFFLOAD_CONT10]]
2279 // CHECK19:       omp_offload.cont10:
2280 // CHECK19-NEXT:    [[TMP52:%.*]] = load i32, i32* [[COMP]], align 4
2281 // CHECK19-NEXT:    ret i32 [[TMP52]]
2282 //
2283 //
2284 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l161
2285 // CHECK19-SAME: (%struct.SS* noundef [[THIS:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR1:[0-9]+]] {
2286 // CHECK19-NEXT:  entry:
2287 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2288 // CHECK19-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
2289 // CHECK19-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2290 // CHECK19-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2291 // CHECK19-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
2292 // CHECK19-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2293 // CHECK19-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 0
2294 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A]], align 4
2295 // CHECK19-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 123)
2296 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
2297 // CHECK19-NEXT:    ret void
2298 //
2299 //
2300 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
2301 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
2302 // CHECK19-NEXT:  entry:
2303 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2304 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2305 // CHECK19-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
2306 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2307 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2308 // CHECK19-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
2309 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
2310 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2311 // CHECK19-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
2312 // CHECK19-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
2313 // CHECK19-NEXT:    ret void
2314 //
2315 //
2316 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l169
2317 // CHECK19-SAME: (%struct.SS* noundef [[THIS:%.*]], i32 noundef [[COMP:%.*]]) #[[ATTR3:[0-9]+]] {
2318 // CHECK19-NEXT:  entry:
2319 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
2320 // CHECK19-NEXT:    [[COMP_ADDR:%.*]] = alloca i32, align 4
2321 // CHECK19-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2322 // CHECK19-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
2323 // CHECK19-NEXT:    store i32 [[COMP]], i32* [[COMP_ADDR]], align 4
2324 // CHECK19-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
2325 // CHECK19-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP1]], i32 0, i32 1
2326 // CHECK19-NEXT:    [[TMP2:%.*]] = load float, float* [[B]], align 4
2327 // CHECK19-NEXT:    [[CONV:%.*]] = fptosi float [[TMP2]] to i32
2328 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV]], 123
2329 // CHECK19-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 456, i32 [[ADD]])
2330 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[COMP_ADDR]])
2331 // CHECK19-NEXT:    ret void
2332 //
2333 //
2334 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1
2335 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[COMP:%.*]]) #[[ATTR1]] {
2336 // CHECK19-NEXT:  entry:
2337 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2338 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2339 // CHECK19-NEXT:    [[COMP_ADDR:%.*]] = alloca i32*, align 4
2340 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2341 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2342 // CHECK19-NEXT:    store i32* [[COMP]], i32** [[COMP_ADDR]], align 4
2343 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[COMP_ADDR]], align 4
2344 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2345 // CHECK19-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
2346 // CHECK19-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
2347 // CHECK19-NEXT:    ret void
2348 //
2349 //
2350 // CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2351 // CHECK19-SAME: () #[[ATTR4:[0-9]+]] {
2352 // CHECK19-NEXT:  entry:
2353 // CHECK19-NEXT:    call void @__tgt_register_requires(i64 1)
2354 // CHECK19-NEXT:    ret void
2355 //
2356 //
2357 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l216
2358 // CHECK25-SAME: (i64 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
2359 // CHECK25-NEXT:  entry:
2360 // CHECK25-NEXT:    [[ARGC_ADDR:%.*]] = alloca i64, align 8
2361 // CHECK25-NEXT:    store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
2362 // CHECK25-NEXT:    [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
2363 // CHECK25-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]])
2364 // CHECK25-NEXT:    ret void
2365 //
2366 //
2367 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined.
2368 // CHECK25-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2369 // CHECK25-NEXT:  entry:
2370 // CHECK25-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2371 // CHECK25-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2372 // CHECK25-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 8
2373 // CHECK25-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2374 // CHECK25-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2375 // CHECK25-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
2376 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
2377 // CHECK25-NEXT:    store i32 0, i32* [[TMP0]], align 4
2378 // CHECK25-NEXT:    ret void
2379 //
2380 //
2381 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPPcEiT__l209
2382 // CHECK25-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
2383 // CHECK25-NEXT:  entry:
2384 // CHECK25-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
2385 // CHECK25-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
2386 // CHECK25-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]])
2387 // CHECK25-NEXT:    ret void
2388 //
2389 //
2390 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..1
2391 // CHECK25-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]]) #[[ATTR0]] {
2392 // CHECK25-NEXT:  entry:
2393 // CHECK25-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2394 // CHECK25-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2395 // CHECK25-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 8
2396 // CHECK25-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2397 // CHECK25-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2398 // CHECK25-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
2399 // CHECK25-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
2400 // CHECK25-NEXT:    store i8** null, i8*** [[TMP0]], align 8
2401 // CHECK25-NEXT:    ret void
2402 //
2403 //
2404 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l216
2405 // CHECK27-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
2406 // CHECK27-NEXT:  entry:
2407 // CHECK27-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
2408 // CHECK27-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
2409 // CHECK27-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]])
2410 // CHECK27-NEXT:    ret void
2411 //
2412 //
2413 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined.
2414 // CHECK27-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2415 // CHECK27-NEXT:  entry:
2416 // CHECK27-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2417 // CHECK27-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2418 // CHECK27-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 4
2419 // CHECK27-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2420 // CHECK27-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2421 // CHECK27-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 4
2422 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 4
2423 // CHECK27-NEXT:    store i32 0, i32* [[TMP0]], align 4
2424 // CHECK27-NEXT:    ret void
2425 //
2426 //
2427 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPPcEiT__l209
2428 // CHECK27-SAME: (i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
2429 // CHECK27-NEXT:  entry:
2430 // CHECK27-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 4
2431 // CHECK27-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 4
2432 // CHECK27-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]])
2433 // CHECK27-NEXT:    ret void
2434 //
2435 //
2436 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..1
2437 // CHECK27-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2438 // CHECK27-NEXT:  entry:
2439 // CHECK27-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2440 // CHECK27-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2441 // CHECK27-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 4
2442 // CHECK27-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2443 // CHECK27-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2444 // CHECK27-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 4
2445 // CHECK27-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 4
2446 // CHECK27-NEXT:    store i8** null, i8*** [[TMP0]], align 4
2447 // CHECK27-NEXT:    ret void
2448 //
2449 //
2450 // CHECK33-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l265
2451 // CHECK33-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i64 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
2452 // CHECK33-NEXT:  entry:
2453 // CHECK33-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2454 // CHECK33-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
2455 // CHECK33-NEXT:    [[ARGC_ADDR:%.*]] = alloca i64, align 8
2456 // CHECK33-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
2457 // CHECK33-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2458 // CHECK33-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
2459 // CHECK33-NEXT:    store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
2460 // CHECK33-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2461 // CHECK33-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*
2462 // CHECK33-NEXT:    [[CONV2:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
2463 // CHECK33-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
2464 // CHECK33-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
2465 // CHECK33-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
2466 // CHECK33-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV2]])
2467 // CHECK33-NEXT:    ret void
2468 //
2469 //
2470 // CHECK33-LABEL: define {{[^@]+}}@.omp_outlined.
2471 // CHECK33-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2472 // CHECK33-NEXT:  entry:
2473 // CHECK33-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2474 // CHECK33-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2475 // CHECK33-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 8
2476 // CHECK33-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2477 // CHECK33-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2478 // CHECK33-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
2479 // CHECK33-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
2480 // CHECK33-NEXT:    store i32 0, i32* [[TMP0]], align 4
2481 // CHECK33-NEXT:    ret void
2482 //
2483 //
2484 // CHECK33-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPPcEiT__l254
2485 // CHECK33-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
2486 // CHECK33-NEXT:  entry:
2487 // CHECK33-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2488 // CHECK33-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
2489 // CHECK33-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
2490 // CHECK33-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2491 // CHECK33-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2492 // CHECK33-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
2493 // CHECK33-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
2494 // CHECK33-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2495 // CHECK33-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*
2496 // CHECK33-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
2497 // CHECK33-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
2498 // CHECK33-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
2499 // CHECK33-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]])
2500 // CHECK33-NEXT:    ret void
2501 //
2502 //
2503 // CHECK33-LABEL: define {{[^@]+}}@.omp_outlined..1
2504 // CHECK33-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 8 dereferenceable(8) [[ARGC:%.*]]) #[[ATTR0]] {
2505 // CHECK33-NEXT:  entry:
2506 // CHECK33-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2507 // CHECK33-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2508 // CHECK33-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 8
2509 // CHECK33-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2510 // CHECK33-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2511 // CHECK33-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8
2512 // CHECK33-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8
2513 // CHECK33-NEXT:    store i8** null, i8*** [[TMP0]], align 8
2514 // CHECK33-NEXT:    ret void
2515 //
2516 //
2517 // CHECK35-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l265
2518 // CHECK35-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i32 noundef [[ARGC:%.*]]) #[[ATTR0:[0-9]+]] {
2519 // CHECK35-NEXT:  entry:
2520 // CHECK35-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2521 // CHECK35-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
2522 // CHECK35-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
2523 // CHECK35-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
2524 // CHECK35-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2525 // CHECK35-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
2526 // CHECK35-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
2527 // CHECK35-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
2528 // CHECK35-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
2529 // CHECK35-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
2530 // CHECK35-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[ARGC_ADDR]])
2531 // CHECK35-NEXT:    ret void
2532 //
2533 //
2534 // CHECK35-LABEL: define {{[^@]+}}@.omp_outlined.
2535 // CHECK35-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2536 // CHECK35-NEXT:  entry:
2537 // CHECK35-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2538 // CHECK35-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2539 // CHECK35-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 4
2540 // CHECK35-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2541 // CHECK35-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2542 // CHECK35-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 4
2543 // CHECK35-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 4
2544 // CHECK35-NEXT:    store i32 0, i32* [[TMP0]], align 4
2545 // CHECK35-NEXT:    ret void
2546 //
2547 //
2548 // CHECK35-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPPcEiT__l254
2549 // CHECK35-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i8** noundef [[ARGC:%.*]]) #[[ATTR0]] {
2550 // CHECK35-NEXT:  entry:
2551 // CHECK35-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2552 // CHECK35-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
2553 // CHECK35-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 4
2554 // CHECK35-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2555 // CHECK35-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2556 // CHECK35-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
2557 // CHECK35-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 4
2558 // CHECK35-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
2559 // CHECK35-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
2560 // CHECK35-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
2561 // CHECK35-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]])
2562 // CHECK35-NEXT:    ret void
2563 //
2564 //
2565 // CHECK35-LABEL: define {{[^@]+}}@.omp_outlined..1
2566 // CHECK35-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8*** noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]]) #[[ATTR0]] {
2567 // CHECK35-NEXT:  entry:
2568 // CHECK35-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2569 // CHECK35-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2570 // CHECK35-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8***, align 4
2571 // CHECK35-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2572 // CHECK35-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2573 // CHECK35-NEXT:    store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 4
2574 // CHECK35-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 4
2575 // CHECK35-NEXT:    store i8** null, i8*** [[TMP0]], align 4
2576 // CHECK35-NEXT:    ret void
2577 //
2578 //
2579 // CHECK41-LABEL: define {{[^@]+}}@_Z3foov
2580 // CHECK41-SAME: () #[[ATTR0:[0-9]+]] {
2581 // CHECK41-NEXT:  entry:
2582 // CHECK41-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2583 // CHECK41-NEXT:    ret void
2584 //
2585 //
2586 // CHECK41-LABEL: define {{[^@]+}}@.omp_outlined.
2587 // CHECK41-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
2588 // CHECK41-NEXT:  entry:
2589 // CHECK41-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2590 // CHECK41-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2591 // CHECK41-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2592 // CHECK41-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2593 // CHECK41-NEXT:    ret void
2594 //
2595 //
2596 // CHECK43-LABEL: define {{[^@]+}}@_Z3foov
2597 // CHECK43-SAME: () #[[ATTR0:[0-9]+]] {
2598 // CHECK43-NEXT:  entry:
2599 // CHECK43-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2600 // CHECK43-NEXT:    ret void
2601 //
2602 //
2603 // CHECK43-LABEL: define {{[^@]+}}@.omp_outlined.
2604 // CHECK43-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
2605 // CHECK43-NEXT:  entry:
2606 // CHECK43-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2607 // CHECK43-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2608 // CHECK43-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2609 // CHECK43-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2610 // CHECK43-NEXT:    ret void
2611 //
2612