1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host codegen.
3 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
9 
10 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
14 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
16 
17 // Test target codegen - host bc file has to be created first.
18 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9
20 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
21 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
22 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
23 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11
24 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
25 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
26 
27 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
28 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
29 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
30 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
31 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
32 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
33 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
34 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
35 
36 // Test host codegen.
37 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
38 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
39 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
40 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
41 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
42 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
43 
44 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
45 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
46 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
47 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
48 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
49 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
50 
51 // Test target codegen - host bc file has to be created first.
52 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
53 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9
54 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
55 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
56 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
57 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11
58 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
59 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
60 
61 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
62 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
63 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
64 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
65 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
66 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
67 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
68 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
69 
70 // expected-no-diagnostics
71 #ifndef HEADER
72 #define HEADER
73 
74 
75 
76 
77 // We have 6 target regions
78 
79 
80 
81 // Check target registration is registered as a Ctor.
82 
83 
84 template<typename tx>
85 tx ftemplate(int n) {
86   tx a = 0;
87 
88   #pragma omp target parallel num_threads(tx(20))
89   {
90   }
91 
92   short b = 1;
93   #pragma omp target parallel num_threads(b)
94   {
95     a += b;
96   }
97 
98   return a;
99 }
100 
101 static
102 int fstatic(int n) {
103 
104   #pragma omp target parallel num_threads(n)
105   {
106   }
107 
108   #pragma omp target parallel num_threads(32+n)
109   {
110   }
111 
112   return n+1;
113 }
114 
115 struct S1 {
116   double a;
117 
118   int r1(int n){
119     int b = 1;
120 
121     #pragma omp target parallel num_threads(n-b)
122     {
123       this->a = (double)b + 1.5;
124     }
125 
126     #pragma omp target parallel num_threads(1024)
127     {
128       this->a = 2.5;
129     }
130 
131     return (int)a;
132   }
133 };
134 
135 int bar(int n){
136   int a = 0;
137 
138   S1 S;
139   a += S.r1(n);
140 
141   a += fstatic(n);
142 
143   a += ftemplate<int>(n);
144 
145   return a;
146 }
147 
148 
149 
150 
151 
152 
153 
154 
155 
156 
157 
158 
159 
160 
161 
162 
163 
164 
165 
166 
167 
168 // Check that the offloading functions are emitted and that the parallel function
169 // is appropriately guarded.
170 
171 
172 
173 
174 
175 
176 
177 
178 
179 
180 
181 
182 
183 
184 
185 
186 
187 
188 
189 
190 
191 #endif
192 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari
193 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
194 // CHECK1-NEXT:  entry:
195 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
196 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
197 // CHECK1-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
198 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
199 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
200 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
201 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]])
202 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
203 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
204 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
205 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
206 // CHECK1-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP2]])
207 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
208 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
209 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
210 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
211 // CHECK1-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP4]])
212 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
213 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
214 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
215 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
216 // CHECK1-NEXT:    ret i32 [[TMP6]]
217 //
218 //
219 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
220 // CHECK1-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
221 // CHECK1-NEXT:  entry:
222 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
223 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
224 // CHECK1-NEXT:    [[B:%.*]] = alloca i32, align 4
225 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
226 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
227 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
228 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
229 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
230 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
231 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8
232 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8
233 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8
234 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
235 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
236 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
237 // CHECK1-NEXT:    store i32 1, i32* [[B]], align 4
238 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
239 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B]], align 4
240 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]]
241 // CHECK1-NEXT:    store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4
242 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B]], align 4
243 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
244 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
245 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[B_CASTED]], align 8
246 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
247 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
248 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[CONV2]], align 4
249 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
250 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
251 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
252 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1**
253 // CHECK1-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 8
254 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
255 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double**
256 // CHECK1-NEXT:    store double* [[A]], double** [[TMP9]], align 8
257 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
258 // CHECK1-NEXT:    store i8* null, i8** [[TMP10]], align 8
259 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
260 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
261 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP12]], align 8
262 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
263 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64*
264 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP14]], align 8
265 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
266 // CHECK1-NEXT:    store i8* null, i8** [[TMP15]], align 8
267 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
268 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
269 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP17]], align 8
270 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
271 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
272 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP19]], align 8
273 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
274 // CHECK1-NEXT:    store i8* null, i8** [[TMP20]], align 8
275 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
276 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
277 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
278 // CHECK1-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP23]])
279 // CHECK1-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
280 // CHECK1-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
281 // CHECK1:       omp_offload.failed:
282 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
283 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
284 // CHECK1:       omp_offload.cont:
285 // CHECK1-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
286 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
287 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1**
288 // CHECK1-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 8
289 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
290 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double**
291 // CHECK1-NEXT:    store double* [[A3]], double** [[TMP29]], align 8
292 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
293 // CHECK1-NEXT:    store i8* null, i8** [[TMP30]], align 8
294 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
295 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
296 // CHECK1-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 1, i32 1024)
297 // CHECK1-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
298 // CHECK1-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
299 // CHECK1:       omp_offload.failed7:
300 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR3]]
301 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
302 // CHECK1:       omp_offload.cont8:
303 // CHECK1-NEXT:    [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
304 // CHECK1-NEXT:    [[TMP35:%.*]] = load double, double* [[A9]], align 8
305 // CHECK1-NEXT:    [[CONV10:%.*]] = fptosi double [[TMP35]] to i32
306 // CHECK1-NEXT:    ret i32 [[CONV10]]
307 //
308 //
309 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici
310 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
311 // CHECK1-NEXT:  entry:
312 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
313 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
314 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
315 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
316 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
317 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
318 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
319 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8
320 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8
321 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8
322 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8
323 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
324 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
325 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
326 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
327 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
328 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
329 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
330 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
331 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
332 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP4]], align 8
333 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
334 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
335 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP6]], align 8
336 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
337 // CHECK1-NEXT:    store i8* null, i8** [[TMP7]], align 8
338 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
339 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
340 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
341 // CHECK1-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP10]])
342 // CHECK1-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
343 // CHECK1-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
344 // CHECK1:       omp_offload.failed:
345 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i64 [[TMP2]]) #[[ATTR3]]
346 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
347 // CHECK1:       omp_offload.cont:
348 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[N_ADDR]], align 4
349 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 32, [[TMP13]]
350 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_1]], align 4
351 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
352 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32*
353 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[CONV3]], align 4
354 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8
355 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
356 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
357 // CHECK1-NEXT:    store i64 [[TMP15]], i64* [[TMP17]], align 8
358 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
359 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
360 // CHECK1-NEXT:    store i64 [[TMP15]], i64* [[TMP19]], align 8
361 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
362 // CHECK1-NEXT:    store i8* null, i8** [[TMP20]], align 8
363 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
364 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
365 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
366 // CHECK1-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP23]])
367 // CHECK1-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
368 // CHECK1-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
369 // CHECK1:       omp_offload.failed7:
370 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i64 [[TMP15]]) #[[ATTR3]]
371 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
372 // CHECK1:       omp_offload.cont8:
373 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
374 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP26]], 1
375 // CHECK1-NEXT:    ret i32 [[ADD9]]
376 //
377 //
378 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
379 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
380 // CHECK1-NEXT:  entry:
381 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
382 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
383 // CHECK1-NEXT:    [[B:%.*]] = alloca i16, align 2
384 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2
385 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
386 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
387 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
388 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
389 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
390 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
391 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
392 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
393 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 20)
394 // CHECK1-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
395 // CHECK1-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
396 // CHECK1:       omp_offload.failed:
397 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR3]]
398 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
399 // CHECK1:       omp_offload.cont:
400 // CHECK1-NEXT:    store i16 1, i16* [[B]], align 2
401 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[B]], align 2
402 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2
403 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
404 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
405 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4
406 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8
407 // CHECK1-NEXT:    [[TMP5:%.*]] = load i16, i16* [[B]], align 2
408 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_CASTED]] to i16*
409 // CHECK1-NEXT:    store i16 [[TMP5]], i16* [[CONV1]], align 2
410 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
411 // CHECK1-NEXT:    [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2
412 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i16*
413 // CHECK1-NEXT:    store i16 [[TMP7]], i16* [[CONV2]], align 2
414 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
415 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
416 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
417 // CHECK1-NEXT:    store i64 [[TMP4]], i64* [[TMP10]], align 8
418 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
419 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
420 // CHECK1-NEXT:    store i64 [[TMP4]], i64* [[TMP12]], align 8
421 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
422 // CHECK1-NEXT:    store i8* null, i8** [[TMP13]], align 8
423 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
424 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
425 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP15]], align 8
426 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
427 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
428 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP17]], align 8
429 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
430 // CHECK1-NEXT:    store i8* null, i8** [[TMP18]], align 8
431 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
432 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
433 // CHECK1-NEXT:    store i64 [[TMP8]], i64* [[TMP20]], align 8
434 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
435 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
436 // CHECK1-NEXT:    store i64 [[TMP8]], i64* [[TMP22]], align 8
437 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
438 // CHECK1-NEXT:    store i8* null, i8** [[TMP23]], align 8
439 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
440 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
441 // CHECK1-NEXT:    [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2
442 // CHECK1-NEXT:    [[TMP27:%.*]] = zext i16 [[TMP26]] to i32
443 // CHECK1-NEXT:    [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP27]])
444 // CHECK1-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
445 // CHECK1-NEXT:    br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
446 // CHECK1:       omp_offload.failed3:
447 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR3]]
448 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT4]]
449 // CHECK1:       omp_offload.cont4:
450 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4
451 // CHECK1-NEXT:    ret i32 [[TMP30]]
452 //
453 //
454 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121
455 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
456 // CHECK1-NEXT:  entry:
457 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
458 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
459 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
460 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
461 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
462 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
463 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
464 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
465 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
466 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
467 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
468 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
469 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
470 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
471 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32*
472 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
473 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8
474 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]])
475 // CHECK1-NEXT:    ret void
476 //
477 //
478 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
479 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]]) #[[ATTR2:[0-9]+]] {
480 // CHECK1-NEXT:  entry:
481 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
482 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
483 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
484 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
485 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
486 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
487 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
488 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
489 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
490 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
491 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
492 // CHECK1-NEXT:    [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double
493 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00
494 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
495 // CHECK1-NEXT:    store double [[ADD]], double* [[A]], align 8
496 // CHECK1-NEXT:    ret void
497 //
498 //
499 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126
500 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]]) #[[ATTR1]] {
501 // CHECK1-NEXT:  entry:
502 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
503 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
504 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
505 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
506 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1024)
507 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]])
508 // CHECK1-NEXT:    ret void
509 //
510 //
511 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
512 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]]) #[[ATTR2]] {
513 // CHECK1-NEXT:  entry:
514 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
515 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
516 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
517 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
518 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
519 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
520 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
521 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
522 // CHECK1-NEXT:    store double 2.500000e+00, double* [[A]], align 8
523 // CHECK1-NEXT:    ret void
524 //
525 //
526 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104
527 // CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
528 // CHECK1-NEXT:  entry:
529 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
530 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
531 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
532 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
533 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
534 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
535 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
536 // CHECK1-NEXT:    ret void
537 //
538 //
539 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
540 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
541 // CHECK1-NEXT:  entry:
542 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
543 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
544 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
545 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
546 // CHECK1-NEXT:    ret void
547 //
548 //
549 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108
550 // CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
551 // CHECK1-NEXT:  entry:
552 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
553 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
554 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
555 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
556 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
557 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
558 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*))
559 // CHECK1-NEXT:    ret void
560 //
561 //
562 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
563 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
564 // CHECK1-NEXT:  entry:
565 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
566 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
567 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
568 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
569 // CHECK1-NEXT:    ret void
570 //
571 //
572 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88
573 // CHECK1-SAME: () #[[ATTR1]] {
574 // CHECK1-NEXT:  entry:
575 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
576 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 20)
577 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*))
578 // CHECK1-NEXT:    ret void
579 //
580 //
581 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
582 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
583 // CHECK1-NEXT:  entry:
584 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
585 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
586 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
587 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
588 // CHECK1-NEXT:    ret void
589 //
590 //
591 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93
592 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
593 // CHECK1-NEXT:  entry:
594 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
595 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
596 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
597 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
598 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
599 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
600 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
601 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
602 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
603 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
604 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16*
605 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16*
606 // CHECK1-NEXT:    [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 2
607 // CHECK1-NEXT:    [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
608 // CHECK1-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
609 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
610 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
611 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV3]], align 4
612 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8
613 // CHECK1-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 2
614 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16*
615 // CHECK1-NEXT:    store i16 [[TMP5]], i16* [[CONV4]], align 2
616 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
617 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]])
618 // CHECK1-NEXT:    ret void
619 //
620 //
621 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
622 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR2]] {
623 // CHECK1-NEXT:  entry:
624 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
625 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
626 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
627 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
628 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
629 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
630 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
631 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
632 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
633 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16*
634 // CHECK1-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 2
635 // CHECK1-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP0]] to i32
636 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
637 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]]
638 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4
639 // CHECK1-NEXT:    ret void
640 //
641 //
642 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
643 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
644 // CHECK1-NEXT:  entry:
645 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
646 // CHECK1-NEXT:    ret void
647 //
648 //
649 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari
650 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
651 // CHECK3-NEXT:  entry:
652 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
653 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
654 // CHECK3-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
655 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
656 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
657 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
658 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]])
659 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
660 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
661 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
662 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
663 // CHECK3-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP2]])
664 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
665 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
666 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
667 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
668 // CHECK3-NEXT:    [[CALL3:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP4]])
669 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
670 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
671 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
672 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
673 // CHECK3-NEXT:    ret i32 [[TMP6]]
674 //
675 //
676 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
677 // CHECK3-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
678 // CHECK3-NEXT:  entry:
679 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
680 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
681 // CHECK3-NEXT:    [[B:%.*]] = alloca i32, align 4
682 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
683 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
684 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
685 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
686 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
687 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
688 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
689 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
690 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
691 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
692 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
693 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
694 // CHECK3-NEXT:    store i32 1, i32* [[B]], align 4
695 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
696 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B]], align 4
697 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]]
698 // CHECK3-NEXT:    store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4
699 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B]], align 4
700 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[B_CASTED]], align 4
701 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[B_CASTED]], align 4
702 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
703 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
704 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
705 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
706 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
707 // CHECK3-NEXT:    [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1**
708 // CHECK3-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 4
709 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
710 // CHECK3-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double**
711 // CHECK3-NEXT:    store double* [[A]], double** [[TMP9]], align 4
712 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
713 // CHECK3-NEXT:    store i8* null, i8** [[TMP10]], align 4
714 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
715 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
716 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP12]], align 4
717 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
718 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
719 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP14]], align 4
720 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
721 // CHECK3-NEXT:    store i8* null, i8** [[TMP15]], align 4
722 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
723 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
724 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP17]], align 4
725 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
726 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
727 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP19]], align 4
728 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
729 // CHECK3-NEXT:    store i8* null, i8** [[TMP20]], align 4
730 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
731 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
732 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
733 // CHECK3-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP23]])
734 // CHECK3-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
735 // CHECK3-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
736 // CHECK3:       omp_offload.failed:
737 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR3:[0-9]+]]
738 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
739 // CHECK3:       omp_offload.cont:
740 // CHECK3-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
741 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
742 // CHECK3-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1**
743 // CHECK3-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 4
744 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
745 // CHECK3-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double**
746 // CHECK3-NEXT:    store double* [[A2]], double** [[TMP29]], align 4
747 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
748 // CHECK3-NEXT:    store i8* null, i8** [[TMP30]], align 4
749 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
750 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
751 // CHECK3-NEXT:    [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 1, i32 1024)
752 // CHECK3-NEXT:    [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
753 // CHECK3-NEXT:    br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
754 // CHECK3:       omp_offload.failed6:
755 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR3]]
756 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT7]]
757 // CHECK3:       omp_offload.cont7:
758 // CHECK3-NEXT:    [[A8:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
759 // CHECK3-NEXT:    [[TMP35:%.*]] = load double, double* [[A8]], align 4
760 // CHECK3-NEXT:    [[CONV:%.*]] = fptosi double [[TMP35]] to i32
761 // CHECK3-NEXT:    ret i32 [[CONV]]
762 //
763 //
764 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici
765 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
766 // CHECK3-NEXT:  entry:
767 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
768 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
769 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
770 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
771 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
772 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
773 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
774 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4
775 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
776 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
777 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
778 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
779 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
780 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
781 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
782 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
783 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
784 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
785 // CHECK3-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
786 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[TMP4]], align 4
787 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
788 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
789 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[TMP6]], align 4
790 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
791 // CHECK3-NEXT:    store i8* null, i8** [[TMP7]], align 4
792 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
793 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
794 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
795 // CHECK3-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP10]])
796 // CHECK3-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
797 // CHECK3-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
798 // CHECK3:       omp_offload.failed:
799 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i32 [[TMP2]]) #[[ATTR3]]
800 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
801 // CHECK3:       omp_offload.cont:
802 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[N_ADDR]], align 4
803 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 32, [[TMP13]]
804 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_1]], align 4
805 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
806 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4
807 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4
808 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
809 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
810 // CHECK3-NEXT:    store i32 [[TMP15]], i32* [[TMP17]], align 4
811 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
812 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
813 // CHECK3-NEXT:    store i32 [[TMP15]], i32* [[TMP19]], align 4
814 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
815 // CHECK3-NEXT:    store i8* null, i8** [[TMP20]], align 4
816 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
817 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
818 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
819 // CHECK3-NEXT:    [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP23]])
820 // CHECK3-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
821 // CHECK3-NEXT:    br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
822 // CHECK3:       omp_offload.failed6:
823 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i32 [[TMP15]]) #[[ATTR3]]
824 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT7]]
825 // CHECK3:       omp_offload.cont7:
826 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[N_ADDR]], align 4
827 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP26]], 1
828 // CHECK3-NEXT:    ret i32 [[ADD8]]
829 //
830 //
831 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
832 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
833 // CHECK3-NEXT:  entry:
834 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
835 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
836 // CHECK3-NEXT:    [[B:%.*]] = alloca i16, align 2
837 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2
838 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
839 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
840 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
841 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
842 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
843 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
844 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
845 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
846 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 20)
847 // CHECK3-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
848 // CHECK3-NEXT:    br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
849 // CHECK3:       omp_offload.failed:
850 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR3]]
851 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
852 // CHECK3:       omp_offload.cont:
853 // CHECK3-NEXT:    store i16 1, i16* [[B]], align 2
854 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[B]], align 2
855 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2
856 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
857 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[A_CASTED]], align 4
858 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4
859 // CHECK3-NEXT:    [[TMP5:%.*]] = load i16, i16* [[B]], align 2
860 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[B_CASTED]] to i16*
861 // CHECK3-NEXT:    store i16 [[TMP5]], i16* [[CONV]], align 2
862 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
863 // CHECK3-NEXT:    [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2
864 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i16*
865 // CHECK3-NEXT:    store i16 [[TMP7]], i16* [[CONV1]], align 2
866 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
867 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
868 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
869 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[TMP10]], align 4
870 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
871 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
872 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[TMP12]], align 4
873 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
874 // CHECK3-NEXT:    store i8* null, i8** [[TMP13]], align 4
875 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
876 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
877 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[TMP15]], align 4
878 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
879 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
880 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[TMP17]], align 4
881 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
882 // CHECK3-NEXT:    store i8* null, i8** [[TMP18]], align 4
883 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
884 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
885 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[TMP20]], align 4
886 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
887 // CHECK3-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
888 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[TMP22]], align 4
889 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
890 // CHECK3-NEXT:    store i8* null, i8** [[TMP23]], align 4
891 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
892 // CHECK3-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
893 // CHECK3-NEXT:    [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2
894 // CHECK3-NEXT:    [[TMP27:%.*]] = zext i16 [[TMP26]] to i32
895 // CHECK3-NEXT:    [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP27]])
896 // CHECK3-NEXT:    [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
897 // CHECK3-NEXT:    br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]]
898 // CHECK3:       omp_offload.failed2:
899 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR3]]
900 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT3]]
901 // CHECK3:       omp_offload.cont3:
902 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4
903 // CHECK3-NEXT:    ret i32 [[TMP30]]
904 //
905 //
906 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121
907 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
908 // CHECK3-NEXT:  entry:
909 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
910 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
911 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
912 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
913 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
914 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
915 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
916 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
917 // CHECK3-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
918 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
919 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
920 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4
921 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[B_CASTED]], align 4
922 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4
923 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]])
924 // CHECK3-NEXT:    ret void
925 //
926 //
927 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
928 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]]) #[[ATTR2:[0-9]+]] {
929 // CHECK3-NEXT:  entry:
930 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
931 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
932 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
933 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
934 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
935 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
936 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
937 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
938 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
939 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
940 // CHECK3-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
941 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
942 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
943 // CHECK3-NEXT:    store double [[ADD]], double* [[A]], align 4
944 // CHECK3-NEXT:    ret void
945 //
946 //
947 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126
948 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]]) #[[ATTR1]] {
949 // CHECK3-NEXT:  entry:
950 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
951 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
952 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
953 // CHECK3-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
954 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1024)
955 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]])
956 // CHECK3-NEXT:    ret void
957 //
958 //
959 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
960 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]]) #[[ATTR2]] {
961 // CHECK3-NEXT:  entry:
962 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
963 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
964 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
965 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
966 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
967 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
968 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
969 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
970 // CHECK3-NEXT:    store double 2.500000e+00, double* [[A]], align 4
971 // CHECK3-NEXT:    ret void
972 //
973 //
974 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104
975 // CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
976 // CHECK3-NEXT:  entry:
977 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
978 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
979 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
980 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
981 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
982 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
983 // CHECK3-NEXT:    ret void
984 //
985 //
986 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
987 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
988 // CHECK3-NEXT:  entry:
989 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
990 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
991 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
992 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
993 // CHECK3-NEXT:    ret void
994 //
995 //
996 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108
997 // CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
998 // CHECK3-NEXT:  entry:
999 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1000 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1001 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1002 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1003 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
1004 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*))
1005 // CHECK3-NEXT:    ret void
1006 //
1007 //
1008 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
1009 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
1010 // CHECK3-NEXT:  entry:
1011 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1012 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1013 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1014 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1015 // CHECK3-NEXT:    ret void
1016 //
1017 //
1018 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88
1019 // CHECK3-SAME: () #[[ATTR1]] {
1020 // CHECK3-NEXT:  entry:
1021 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1022 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 20)
1023 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*))
1024 // CHECK3-NEXT:    ret void
1025 //
1026 //
1027 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
1028 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
1029 // CHECK3-NEXT:  entry:
1030 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1031 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1032 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1033 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1034 // CHECK3-NEXT:    ret void
1035 //
1036 //
1037 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93
1038 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
1039 // CHECK3-NEXT:  entry:
1040 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1041 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1042 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1043 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
1044 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
1045 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1046 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1047 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1048 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1049 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
1050 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16*
1051 // CHECK3-NEXT:    [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 2
1052 // CHECK3-NEXT:    [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
1053 // CHECK3-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
1054 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4
1055 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[A_CASTED]], align 4
1056 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4
1057 // CHECK3-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
1058 // CHECK3-NEXT:    [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16*
1059 // CHECK3-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
1060 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
1061 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]])
1062 // CHECK3-NEXT:    ret void
1063 //
1064 //
1065 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
1066 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR2]] {
1067 // CHECK3-NEXT:  entry:
1068 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1069 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1070 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1071 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1072 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1073 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1074 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1075 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1076 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
1077 // CHECK3-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
1078 // CHECK3-NEXT:    [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
1079 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
1080 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]]
1081 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4
1082 // CHECK3-NEXT:    ret void
1083 //
1084 //
1085 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1086 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
1087 // CHECK3-NEXT:  entry:
1088 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
1089 // CHECK3-NEXT:    ret void
1090 //
1091 //
1092 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104
1093 // CHECK9-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0:[0-9]+]] {
1094 // CHECK9-NEXT:  entry:
1095 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1096 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1097 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1098 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
1099 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1100 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
1101 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1102 // CHECK9-NEXT:    ret void
1103 //
1104 //
1105 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
1106 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
1107 // CHECK9-NEXT:  entry:
1108 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1109 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1110 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1111 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1112 // CHECK9-NEXT:    ret void
1113 //
1114 //
1115 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108
1116 // CHECK9-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1117 // CHECK9-NEXT:  entry:
1118 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1119 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1120 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1121 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
1122 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1123 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
1124 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
1125 // CHECK9-NEXT:    ret void
1126 //
1127 //
1128 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
1129 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
1130 // CHECK9-NEXT:  entry:
1131 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1132 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1133 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1134 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1135 // CHECK9-NEXT:    ret void
1136 //
1137 //
1138 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121
1139 // CHECK9-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1140 // CHECK9-NEXT:  entry:
1141 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1142 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1143 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1144 // CHECK9-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1145 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1146 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1147 // CHECK9-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1148 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1149 // CHECK9-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1150 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1151 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
1152 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
1153 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
1154 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
1155 // CHECK9-NEXT:    [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32*
1156 // CHECK9-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
1157 // CHECK9-NEXT:    [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8
1158 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]])
1159 // CHECK9-NEXT:    ret void
1160 //
1161 //
1162 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
1163 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]]) #[[ATTR1]] {
1164 // CHECK9-NEXT:  entry:
1165 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1166 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1167 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1168 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1169 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1170 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1171 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1172 // CHECK9-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1173 // CHECK9-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1174 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1175 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1176 // CHECK9-NEXT:    [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double
1177 // CHECK9-NEXT:    [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00
1178 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1179 // CHECK9-NEXT:    store double [[ADD]], double* [[A]], align 8
1180 // CHECK9-NEXT:    ret void
1181 //
1182 //
1183 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126
1184 // CHECK9-SAME: (%struct.S1* noundef [[THIS:%.*]]) #[[ATTR0]] {
1185 // CHECK9-NEXT:  entry:
1186 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1187 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1188 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1189 // CHECK9-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1190 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1024)
1191 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]])
1192 // CHECK9-NEXT:    ret void
1193 //
1194 //
1195 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
1196 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]]) #[[ATTR1]] {
1197 // CHECK9-NEXT:  entry:
1198 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1199 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1200 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1201 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1202 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1203 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1204 // CHECK9-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1205 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1206 // CHECK9-NEXT:    store double 2.500000e+00, double* [[A]], align 8
1207 // CHECK9-NEXT:    ret void
1208 //
1209 //
1210 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88
1211 // CHECK9-SAME: () #[[ATTR0]] {
1212 // CHECK9-NEXT:  entry:
1213 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1214 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 20)
1215 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
1216 // CHECK9-NEXT:    ret void
1217 //
1218 //
1219 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4
1220 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
1221 // CHECK9-NEXT:  entry:
1222 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1223 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1224 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1225 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1226 // CHECK9-NEXT:    ret void
1227 //
1228 //
1229 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93
1230 // CHECK9-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1231 // CHECK9-NEXT:  entry:
1232 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1233 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1234 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1235 // CHECK9-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1236 // CHECK9-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1237 // CHECK9-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1238 // CHECK9-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1239 // CHECK9-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1240 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1241 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1242 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16*
1243 // CHECK9-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16*
1244 // CHECK9-NEXT:    [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 2
1245 // CHECK9-NEXT:    [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
1246 // CHECK9-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
1247 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
1248 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1249 // CHECK9-NEXT:    store i32 [[TMP3]], i32* [[CONV3]], align 4
1250 // CHECK9-NEXT:    [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8
1251 // CHECK9-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 2
1252 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16*
1253 // CHECK9-NEXT:    store i16 [[TMP5]], i16* [[CONV4]], align 2
1254 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
1255 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]])
1256 // CHECK9-NEXT:    ret void
1257 //
1258 //
1259 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5
1260 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR1]] {
1261 // CHECK9-NEXT:  entry:
1262 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1263 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1264 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1265 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1266 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1267 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1268 // CHECK9-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1269 // CHECK9-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1270 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1271 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16*
1272 // CHECK9-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 2
1273 // CHECK9-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP0]] to i32
1274 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1275 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]]
1276 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4
1277 // CHECK9-NEXT:    ret void
1278 //
1279 //
1280 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104
1281 // CHECK11-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0:[0-9]+]] {
1282 // CHECK11-NEXT:  entry:
1283 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1284 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1285 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1286 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1287 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
1288 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1289 // CHECK11-NEXT:    ret void
1290 //
1291 //
1292 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
1293 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
1294 // CHECK11-NEXT:  entry:
1295 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1296 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1297 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1298 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1299 // CHECK11-NEXT:    ret void
1300 //
1301 //
1302 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108
1303 // CHECK11-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1304 // CHECK11-NEXT:  entry:
1305 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1306 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1307 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1308 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1309 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]])
1310 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
1311 // CHECK11-NEXT:    ret void
1312 //
1313 //
1314 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
1315 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
1316 // CHECK11-NEXT:  entry:
1317 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1318 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1319 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1320 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1321 // CHECK11-NEXT:    ret void
1322 //
1323 //
1324 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121
1325 // CHECK11-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1326 // CHECK11-NEXT:  entry:
1327 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
1328 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1329 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1330 // CHECK11-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
1331 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1332 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
1333 // CHECK11-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1334 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1335 // CHECK11-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
1336 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1337 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
1338 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4
1339 // CHECK11-NEXT:    store i32 [[TMP3]], i32* [[B_CASTED]], align 4
1340 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4
1341 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]])
1342 // CHECK11-NEXT:    ret void
1343 //
1344 //
1345 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
1346 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]]) #[[ATTR1]] {
1347 // CHECK11-NEXT:  entry:
1348 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1349 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1350 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
1351 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1352 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1353 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1354 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
1355 // CHECK11-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1356 // CHECK11-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
1357 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
1358 // CHECK11-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
1359 // CHECK11-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
1360 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1361 // CHECK11-NEXT:    store double [[ADD]], double* [[A]], align 4
1362 // CHECK11-NEXT:    ret void
1363 //
1364 //
1365 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126
1366 // CHECK11-SAME: (%struct.S1* noundef [[THIS:%.*]]) #[[ATTR0]] {
1367 // CHECK11-NEXT:  entry:
1368 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
1369 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1370 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
1371 // CHECK11-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
1372 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1024)
1373 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]])
1374 // CHECK11-NEXT:    ret void
1375 //
1376 //
1377 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
1378 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]]) #[[ATTR1]] {
1379 // CHECK11-NEXT:  entry:
1380 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1381 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1382 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
1383 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1384 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1385 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
1386 // CHECK11-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
1387 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1388 // CHECK11-NEXT:    store double 2.500000e+00, double* [[A]], align 4
1389 // CHECK11-NEXT:    ret void
1390 //
1391 //
1392 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88
1393 // CHECK11-SAME: () #[[ATTR0]] {
1394 // CHECK11-NEXT:  entry:
1395 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1396 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 20)
1397 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*))
1398 // CHECK11-NEXT:    ret void
1399 //
1400 //
1401 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4
1402 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
1403 // CHECK11-NEXT:  entry:
1404 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1405 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1406 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1407 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1408 // CHECK11-NEXT:    ret void
1409 //
1410 //
1411 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93
1412 // CHECK11-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
1413 // CHECK11-NEXT:  entry:
1414 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1415 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1416 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
1417 // CHECK11-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
1418 // CHECK11-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
1419 // CHECK11-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1420 // CHECK11-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1421 // CHECK11-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1422 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
1423 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
1424 // CHECK11-NEXT:    [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16*
1425 // CHECK11-NEXT:    [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 2
1426 // CHECK11-NEXT:    [[TMP2:%.*]] = sext i16 [[TMP1]] to i32
1427 // CHECK11-NEXT:    call void @__kmpc_push_num_threads(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]])
1428 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4
1429 // CHECK11-NEXT:    store i32 [[TMP3]], i32* [[A_CASTED]], align 4
1430 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4
1431 // CHECK11-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
1432 // CHECK11-NEXT:    [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16*
1433 // CHECK11-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
1434 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
1435 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]])
1436 // CHECK11-NEXT:    ret void
1437 //
1438 //
1439 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5
1440 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR1]] {
1441 // CHECK11-NEXT:  entry:
1442 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1443 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1444 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1445 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
1446 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1447 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1448 // CHECK11-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1449 // CHECK11-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
1450 // CHECK11-NEXT:    [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
1451 // CHECK11-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
1452 // CHECK11-NEXT:    [[CONV1:%.*]] = sext i16 [[TMP0]] to i32
1453 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
1454 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]]
1455 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4
1456 // CHECK11-NEXT:    ret void
1457 //
1458