1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // REQUIRES: powerpc-registered-target
3 // REQUIRES: nvptx-registered-target
4
5 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
6 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK2
8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -emit-pch -o %t
9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - | FileCheck %s --check-prefix=CHECK3
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - | FileCheck %s --check-prefix=CHECK3
11
12 // expected-no-diagnostics
13 #ifndef HEADER
14 #define HEADER
15
16 template <typename T>
foo(const T & t)17 int foo(const T &t) {
18 #pragma omp target parallel
19 t();
20 return 0;
21 }
22
23 struct S {
24 int a = 15;
fooS25 int foo() {
26 auto &&L = [&]() { return a; };
27 #pragma omp target
28 L();
29 #pragma omp target parallel
30 L();
31 return a + ::foo(L);
32 }
33 } s;
34
main(int argc,char ** argv)35 int main(int argc, char **argv) {
36 int &b = argc;
37 int &&c = 1;
38 int *d = &argc;
39 int a;
40 auto &&L = [&]() { return argc + b + c + reinterpret_cast<long int>(d) + a; };
41 #pragma omp target firstprivate(argc) map(to : a)
42 L();
43 #pragma omp target parallel
44 L();
45 return argc + s.foo();
46 }
47
48 #endif // HEADER
49 // CHECK1-LABEL: define {{[^@]+}}@main
50 // CHECK1-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
51 // CHECK1-NEXT: entry:
52 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
53 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
54 // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
55 // CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8
56 // CHECK1-NEXT: [[C:%.*]] = alloca i32*, align 8
57 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4
58 // CHECK1-NEXT: [[D:%.*]] = alloca i32*, align 8
59 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
60 // CHECK1-NEXT: [[L:%.*]] = alloca %class.anon*, align 8
61 // CHECK1-NEXT: [[REF_TMP1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
62 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
63 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
64 // CHECK1-NEXT: [[_TMP3:%.*]] = alloca %class.anon*, align 8
65 // CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8
66 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [11 x i8*], align 8
67 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [11 x i8*], align 8
68 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [11 x i8*], align 8
69 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32*, align 8
70 // CHECK1-NEXT: [[_TMP5:%.*]] = alloca i32*, align 8
71 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca %class.anon*, align 8
72 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [11 x i8*], align 8
73 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [11 x i8*], align 8
74 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [11 x i8*], align 8
75 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
76 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
77 // CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
78 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[B]], align 8
79 // CHECK1-NEXT: store i32 1, i32* [[REF_TMP]], align 4
80 // CHECK1-NEXT: store i32* [[REF_TMP]], i32** [[C]], align 8
81 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[D]], align 8
82 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 0
83 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8
84 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 1
85 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B]], align 8
86 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[TMP1]], align 8
87 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 2
88 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
89 // CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP3]], align 8
90 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 3
91 // CHECK1-NEXT: store i32** [[D]], i32*** [[TMP5]], align 8
92 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 4
93 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP6]], align 8
94 // CHECK1-NEXT: store %class.anon* [[REF_TMP1]], %class.anon** [[L]], align 8
95 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[B]], align 8
96 // CHECK1-NEXT: store i32* [[TMP7]], i32** [[TMP]], align 8
97 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[C]], align 8
98 // CHECK1-NEXT: store i32* [[TMP8]], i32** [[_TMP2]], align 8
99 // CHECK1-NEXT: [[TMP9:%.*]] = load %class.anon*, %class.anon** [[L]], align 8
100 // CHECK1-NEXT: store %class.anon* [[TMP9]], %class.anon** [[_TMP3]], align 8
101 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
102 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_CASTED]] to i32*
103 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[CONV]], align 4
104 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[ARGC_CASTED]], align 8
105 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP]], align 8
106 // CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8
107 // CHECK1-NEXT: [[TMP14:%.*]] = load i32*, i32** [[D]], align 8
108 // CHECK1-NEXT: [[TMP15:%.*]] = load %class.anon*, %class.anon** [[_TMP3]], align 8
109 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 0
110 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 0
111 // CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8
112 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 1
113 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 1
114 // CHECK1-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP20]], align 8
115 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 2
116 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 2
117 // CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP23]], align 8
118 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 3
119 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 3
120 // CHECK1-NEXT: [[TMP27:%.*]] = load i32**, i32*** [[TMP26]], align 8
121 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 4
122 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 4
123 // CHECK1-NEXT: [[TMP30:%.*]] = load i32*, i32** [[TMP29]], align 8
124 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
125 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
126 // CHECK1-NEXT: store i64 [[TMP11]], i64* [[TMP32]], align 8
127 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
128 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
129 // CHECK1-NEXT: store i64 [[TMP11]], i64* [[TMP34]], align 8
130 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
131 // CHECK1-NEXT: store i8* null, i8** [[TMP35]], align 8
132 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
133 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32**
134 // CHECK1-NEXT: store i32* [[TMP12]], i32** [[TMP37]], align 8
135 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
136 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32**
137 // CHECK1-NEXT: store i32* [[TMP12]], i32** [[TMP39]], align 8
138 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
139 // CHECK1-NEXT: store i8* null, i8** [[TMP40]], align 8
140 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
141 // CHECK1-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32**
142 // CHECK1-NEXT: store i32* [[TMP13]], i32** [[TMP42]], align 8
143 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
144 // CHECK1-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
145 // CHECK1-NEXT: store i32* [[TMP13]], i32** [[TMP44]], align 8
146 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
147 // CHECK1-NEXT: store i8* null, i8** [[TMP45]], align 8
148 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
149 // CHECK1-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32**
150 // CHECK1-NEXT: store i32* [[TMP14]], i32** [[TMP47]], align 8
151 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
152 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
153 // CHECK1-NEXT: store i32* [[TMP14]], i32** [[TMP49]], align 8
154 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
155 // CHECK1-NEXT: store i8* null, i8** [[TMP50]], align 8
156 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
157 // CHECK1-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
158 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP52]], align 8
159 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
160 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
161 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP54]], align 8
162 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
163 // CHECK1-NEXT: store i8* null, i8** [[TMP55]], align 8
164 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
165 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to %class.anon**
166 // CHECK1-NEXT: store %class.anon* [[TMP15]], %class.anon** [[TMP57]], align 8
167 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
168 // CHECK1-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to %class.anon**
169 // CHECK1-NEXT: store %class.anon* [[TMP15]], %class.anon** [[TMP59]], align 8
170 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
171 // CHECK1-NEXT: store i8* null, i8** [[TMP60]], align 8
172 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
173 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32***
174 // CHECK1-NEXT: store i32** [[TMP16]], i32*** [[TMP62]], align 8
175 // CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6
176 // CHECK1-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32**
177 // CHECK1-NEXT: store i32* [[TMP18]], i32** [[TMP64]], align 8
178 // CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6
179 // CHECK1-NEXT: store i8* null, i8** [[TMP65]], align 8
180 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7
181 // CHECK1-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32***
182 // CHECK1-NEXT: store i32** [[TMP19]], i32*** [[TMP67]], align 8
183 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7
184 // CHECK1-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32**
185 // CHECK1-NEXT: store i32* [[TMP21]], i32** [[TMP69]], align 8
186 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7
187 // CHECK1-NEXT: store i8* null, i8** [[TMP70]], align 8
188 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8
189 // CHECK1-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32***
190 // CHECK1-NEXT: store i32** [[TMP22]], i32*** [[TMP72]], align 8
191 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8
192 // CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32**
193 // CHECK1-NEXT: store i32* [[TMP24]], i32** [[TMP74]], align 8
194 // CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8
195 // CHECK1-NEXT: store i8* null, i8** [[TMP75]], align 8
196 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9
197 // CHECK1-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32****
198 // CHECK1-NEXT: store i32*** [[TMP25]], i32**** [[TMP77]], align 8
199 // CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9
200 // CHECK1-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32***
201 // CHECK1-NEXT: store i32** [[TMP27]], i32*** [[TMP79]], align 8
202 // CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9
203 // CHECK1-NEXT: store i8* null, i8** [[TMP80]], align 8
204 // CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 10
205 // CHECK1-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32***
206 // CHECK1-NEXT: store i32** [[TMP28]], i32*** [[TMP82]], align 8
207 // CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 10
208 // CHECK1-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
209 // CHECK1-NEXT: store i32* [[TMP30]], i32** [[TMP84]], align 8
210 // CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 10
211 // CHECK1-NEXT: store i8* null, i8** [[TMP85]], align 8
212 // CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
213 // CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
214 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
215 // CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
216 // CHECK1-NEXT: store i32 1, i32* [[TMP88]], align 4
217 // CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
218 // CHECK1-NEXT: store i32 11, i32* [[TMP89]], align 4
219 // CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
220 // CHECK1-NEXT: store i8** [[TMP86]], i8*** [[TMP90]], align 8
221 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
222 // CHECK1-NEXT: store i8** [[TMP87]], i8*** [[TMP91]], align 8
223 // CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
224 // CHECK1-NEXT: store i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP92]], align 8
225 // CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
226 // CHECK1-NEXT: store i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP93]], align 8
227 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
228 // CHECK1-NEXT: store i8** null, i8*** [[TMP94]], align 8
229 // CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
230 // CHECK1-NEXT: store i8** null, i8*** [[TMP95]], align 8
231 // CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
232 // CHECK1-NEXT: store i64 0, i64* [[TMP96]], align 8
233 // CHECK1-NEXT: [[TMP97:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
234 // CHECK1-NEXT: [[TMP98:%.*]] = icmp ne i32 [[TMP97]], 0
235 // CHECK1-NEXT: br i1 [[TMP98]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
236 // CHECK1: omp_offload.failed:
237 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41(i64 [[TMP11]], i32* [[TMP12]], i32* [[TMP13]], i32* [[TMP14]], i32* [[A]], %class.anon* [[TMP15]]) #[[ATTR4:[0-9]+]]
238 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
239 // CHECK1: omp_offload.cont:
240 // CHECK1-NEXT: [[TMP99:%.*]] = load i32*, i32** [[B]], align 8
241 // CHECK1-NEXT: store i32* [[TMP99]], i32** [[_TMP4]], align 8
242 // CHECK1-NEXT: [[TMP100:%.*]] = load i32*, i32** [[C]], align 8
243 // CHECK1-NEXT: store i32* [[TMP100]], i32** [[_TMP5]], align 8
244 // CHECK1-NEXT: [[TMP101:%.*]] = load %class.anon*, %class.anon** [[L]], align 8
245 // CHECK1-NEXT: store %class.anon* [[TMP101]], %class.anon** [[_TMP6]], align 8
246 // CHECK1-NEXT: [[TMP102:%.*]] = load i32*, i32** [[_TMP4]], align 8
247 // CHECK1-NEXT: [[TMP103:%.*]] = load i32*, i32** [[_TMP5]], align 8
248 // CHECK1-NEXT: [[TMP104:%.*]] = load i32*, i32** [[D]], align 8
249 // CHECK1-NEXT: [[TMP105:%.*]] = load %class.anon*, %class.anon** [[_TMP6]], align 8
250 // CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 0
251 // CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 0
252 // CHECK1-NEXT: [[TMP108:%.*]] = load i32*, i32** [[TMP107]], align 8
253 // CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 1
254 // CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 1
255 // CHECK1-NEXT: [[TMP111:%.*]] = load i32*, i32** [[TMP110]], align 8
256 // CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 2
257 // CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 2
258 // CHECK1-NEXT: [[TMP114:%.*]] = load i32*, i32** [[TMP113]], align 8
259 // CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 3
260 // CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 3
261 // CHECK1-NEXT: [[TMP117:%.*]] = load i32**, i32*** [[TMP116]], align 8
262 // CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 4
263 // CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP105]], i32 0, i32 4
264 // CHECK1-NEXT: [[TMP120:%.*]] = load i32*, i32** [[TMP119]], align 8
265 // CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
266 // CHECK1-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
267 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP122]], align 8
268 // CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
269 // CHECK1-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
270 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP124]], align 8
271 // CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
272 // CHECK1-NEXT: store i8* null, i8** [[TMP125]], align 8
273 // CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
274 // CHECK1-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
275 // CHECK1-NEXT: store i32* [[TMP102]], i32** [[TMP127]], align 8
276 // CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
277 // CHECK1-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
278 // CHECK1-NEXT: store i32* [[TMP102]], i32** [[TMP129]], align 8
279 // CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
280 // CHECK1-NEXT: store i8* null, i8** [[TMP130]], align 8
281 // CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2
282 // CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32**
283 // CHECK1-NEXT: store i32* [[TMP103]], i32** [[TMP132]], align 8
284 // CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2
285 // CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32**
286 // CHECK1-NEXT: store i32* [[TMP103]], i32** [[TMP134]], align 8
287 // CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 2
288 // CHECK1-NEXT: store i8* null, i8** [[TMP135]], align 8
289 // CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 3
290 // CHECK1-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32**
291 // CHECK1-NEXT: store i32* [[TMP104]], i32** [[TMP137]], align 8
292 // CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 3
293 // CHECK1-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32**
294 // CHECK1-NEXT: store i32* [[TMP104]], i32** [[TMP139]], align 8
295 // CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 3
296 // CHECK1-NEXT: store i8* null, i8** [[TMP140]], align 8
297 // CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 4
298 // CHECK1-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32**
299 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP142]], align 8
300 // CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 4
301 // CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32**
302 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP144]], align 8
303 // CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 4
304 // CHECK1-NEXT: store i8* null, i8** [[TMP145]], align 8
305 // CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 5
306 // CHECK1-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %class.anon**
307 // CHECK1-NEXT: store %class.anon* [[TMP105]], %class.anon** [[TMP147]], align 8
308 // CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 5
309 // CHECK1-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %class.anon**
310 // CHECK1-NEXT: store %class.anon* [[TMP105]], %class.anon** [[TMP149]], align 8
311 // CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 5
312 // CHECK1-NEXT: store i8* null, i8** [[TMP150]], align 8
313 // CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 6
314 // CHECK1-NEXT: [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32***
315 // CHECK1-NEXT: store i32** [[TMP106]], i32*** [[TMP152]], align 8
316 // CHECK1-NEXT: [[TMP153:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 6
317 // CHECK1-NEXT: [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32**
318 // CHECK1-NEXT: store i32* [[TMP108]], i32** [[TMP154]], align 8
319 // CHECK1-NEXT: [[TMP155:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 6
320 // CHECK1-NEXT: store i8* null, i8** [[TMP155]], align 8
321 // CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 7
322 // CHECK1-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32***
323 // CHECK1-NEXT: store i32** [[TMP109]], i32*** [[TMP157]], align 8
324 // CHECK1-NEXT: [[TMP158:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 7
325 // CHECK1-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
326 // CHECK1-NEXT: store i32* [[TMP111]], i32** [[TMP159]], align 8
327 // CHECK1-NEXT: [[TMP160:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 7
328 // CHECK1-NEXT: store i8* null, i8** [[TMP160]], align 8
329 // CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 8
330 // CHECK1-NEXT: [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32***
331 // CHECK1-NEXT: store i32** [[TMP112]], i32*** [[TMP162]], align 8
332 // CHECK1-NEXT: [[TMP163:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 8
333 // CHECK1-NEXT: [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
334 // CHECK1-NEXT: store i32* [[TMP114]], i32** [[TMP164]], align 8
335 // CHECK1-NEXT: [[TMP165:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 8
336 // CHECK1-NEXT: store i8* null, i8** [[TMP165]], align 8
337 // CHECK1-NEXT: [[TMP166:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 9
338 // CHECK1-NEXT: [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32****
339 // CHECK1-NEXT: store i32*** [[TMP115]], i32**** [[TMP167]], align 8
340 // CHECK1-NEXT: [[TMP168:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 9
341 // CHECK1-NEXT: [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32***
342 // CHECK1-NEXT: store i32** [[TMP117]], i32*** [[TMP169]], align 8
343 // CHECK1-NEXT: [[TMP170:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 9
344 // CHECK1-NEXT: store i8* null, i8** [[TMP170]], align 8
345 // CHECK1-NEXT: [[TMP171:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 10
346 // CHECK1-NEXT: [[TMP172:%.*]] = bitcast i8** [[TMP171]] to i32***
347 // CHECK1-NEXT: store i32** [[TMP118]], i32*** [[TMP172]], align 8
348 // CHECK1-NEXT: [[TMP173:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 10
349 // CHECK1-NEXT: [[TMP174:%.*]] = bitcast i8** [[TMP173]] to i32**
350 // CHECK1-NEXT: store i32* [[TMP120]], i32** [[TMP174]], align 8
351 // CHECK1-NEXT: [[TMP175:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 10
352 // CHECK1-NEXT: store i8* null, i8** [[TMP175]], align 8
353 // CHECK1-NEXT: [[TMP176:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
354 // CHECK1-NEXT: [[TMP177:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
355 // CHECK1-NEXT: [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
356 // CHECK1-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
357 // CHECK1-NEXT: store i32 1, i32* [[TMP178]], align 4
358 // CHECK1-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
359 // CHECK1-NEXT: store i32 11, i32* [[TMP179]], align 4
360 // CHECK1-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
361 // CHECK1-NEXT: store i8** [[TMP176]], i8*** [[TMP180]], align 8
362 // CHECK1-NEXT: [[TMP181:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
363 // CHECK1-NEXT: store i8** [[TMP177]], i8*** [[TMP181]], align 8
364 // CHECK1-NEXT: [[TMP182:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
365 // CHECK1-NEXT: store i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.1, i32 0, i32 0), i64** [[TMP182]], align 8
366 // CHECK1-NEXT: [[TMP183:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
367 // CHECK1-NEXT: store i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP183]], align 8
368 // CHECK1-NEXT: [[TMP184:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
369 // CHECK1-NEXT: store i8** null, i8*** [[TMP184]], align 8
370 // CHECK1-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
371 // CHECK1-NEXT: store i8** null, i8*** [[TMP185]], align 8
372 // CHECK1-NEXT: [[TMP186:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
373 // CHECK1-NEXT: store i64 0, i64* [[TMP186]], align 8
374 // CHECK1-NEXT: [[TMP187:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
375 // CHECK1-NEXT: [[TMP188:%.*]] = icmp ne i32 [[TMP187]], 0
376 // CHECK1-NEXT: br i1 [[TMP188]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
377 // CHECK1: omp_offload.failed11:
378 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43(i32* [[ARGC_ADDR]], i32* [[TMP102]], i32* [[TMP103]], i32* [[TMP104]], i32* [[A]], %class.anon* [[TMP105]]) #[[ATTR4]]
379 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT12]]
380 // CHECK1: omp_offload.cont12:
381 // CHECK1-NEXT: [[TMP189:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
382 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN1S3fooEv(%struct.S* noundef nonnull align 4 dereferenceable(4) @s)
383 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP189]], [[CALL]]
384 // CHECK1-NEXT: ret i32 [[ADD]]
385 //
386 //
387 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
388 // CHECK1-SAME: (i64 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR1:[0-9]+]] {
389 // CHECK1-NEXT: entry:
390 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
391 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
392 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
393 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
394 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
395 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
396 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
397 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
398 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
399 // CHECK1-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
400 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8
401 // CHECK1-NEXT: [[B5:%.*]] = alloca i32, align 4
402 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
403 // CHECK1-NEXT: [[C7:%.*]] = alloca i32, align 4
404 // CHECK1-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8
405 // CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
406 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
407 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
408 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
409 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
410 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
411 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
412 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
413 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8
414 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8
415 // CHECK1-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
416 // CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8
417 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8
418 // CHECK1-NEXT: store %class.anon* [[TMP3]], %class.anon** [[_TMP2]], align 8
419 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
420 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[L3]] to i8*
421 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[TMP4]] to i8*
422 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP5]], i8* align 8 [[TMP6]], i64 40, i1 false)
423 // CHECK1-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8
424 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8
425 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
426 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[B5]], align 4
427 // CHECK1-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8
428 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP1]], align 8
429 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
430 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[C7]], align 4
431 // CHECK1-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8
432 // CHECK1-NEXT: [[TMP11:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
433 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[TMP11]])
434 // CHECK1-NEXT: ret void
435 //
436 //
437 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43
438 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR5:[0-9]+]] {
439 // CHECK1-NEXT: entry:
440 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
441 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
442 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
443 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
444 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
445 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
446 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
447 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
448 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
449 // CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
450 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
451 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
452 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
453 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
454 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
455 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
456 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
457 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
458 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
459 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
460 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
461 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
462 // CHECK1-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8
463 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP]], align 8
464 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8
465 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[D_ADDR]], align 8
466 // CHECK1-NEXT: [[TMP8:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
467 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP5]], i32* [[TMP6]], i32* [[TMP7]], i32* [[TMP3]], %class.anon* [[TMP8]])
468 // CHECK1-NEXT: ret void
469 //
470 //
471 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
472 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR1]] {
473 // CHECK1-NEXT: entry:
474 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
475 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
476 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
477 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
478 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
479 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
480 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
481 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
482 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
483 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
484 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
485 // CHECK1-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
486 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8
487 // CHECK1-NEXT: [[ARGC5:%.*]] = alloca i32, align 4
488 // CHECK1-NEXT: [[B6:%.*]] = alloca i32, align 4
489 // CHECK1-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8
490 // CHECK1-NEXT: [[C8:%.*]] = alloca i32, align 4
491 // CHECK1-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8
492 // CHECK1-NEXT: [[A10:%.*]] = alloca i32, align 4
493 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
494 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
495 // CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
496 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
497 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
498 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
499 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
500 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
501 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
502 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
503 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
504 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
505 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
506 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
507 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
508 // CHECK1-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8
509 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
510 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8*
511 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8*
512 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false)
513 // CHECK1-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8
514 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
515 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4
516 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8
517 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
518 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4
519 // CHECK1-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8
520 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8
521 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
522 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4
523 // CHECK1-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8
524 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4
525 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4
526 // CHECK1-NEXT: [[TMP14:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
527 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[TMP14]])
528 // CHECK1-NEXT: ret void
529 //
530 //
531 // CHECK1-LABEL: define {{[^@]+}}@_ZN1S3fooEv
532 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
533 // CHECK1-NEXT: entry:
534 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
535 // CHECK1-NEXT: [[L:%.*]] = alloca %class.anon.0*, align 8
536 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
537 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
538 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
539 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
540 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
541 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
542 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [3 x i8*], align 8
543 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [3 x i8*], align 8
544 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [3 x i8*], align 8
545 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
546 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
547 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
548 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP0]], align 8
549 // CHECK1-NEXT: store %class.anon.0* [[REF_TMP]], %class.anon.0** [[L]], align 8
550 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8
551 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
552 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
553 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP2]], i32 0, i32 0
554 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP2]], i32 0, i32 0
555 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
556 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to %struct.S**
557 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP6]], align 8
558 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
559 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to %struct.S**
560 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP8]], align 8
561 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
562 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
563 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
564 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %class.anon.0**
565 // CHECK1-NEXT: store %class.anon.0* [[TMP2]], %class.anon.0** [[TMP11]], align 8
566 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
567 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %class.anon.0**
568 // CHECK1-NEXT: store %class.anon.0* [[TMP2]], %class.anon.0** [[TMP13]], align 8
569 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
570 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8
571 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
572 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S***
573 // CHECK1-NEXT: store %struct.S** [[TMP3]], %struct.S*** [[TMP16]], align 8
574 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
575 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.S***
576 // CHECK1-NEXT: store %struct.S** [[TMP4]], %struct.S*** [[TMP18]], align 8
577 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
578 // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8
579 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
580 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
581 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
582 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
583 // CHECK1-NEXT: store i32 1, i32* [[TMP22]], align 4
584 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
585 // CHECK1-NEXT: store i32 3, i32* [[TMP23]], align 4
586 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
587 // CHECK1-NEXT: store i8** [[TMP20]], i8*** [[TMP24]], align 8
588 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
589 // CHECK1-NEXT: store i8** [[TMP21]], i8*** [[TMP25]], align 8
590 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
591 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64** [[TMP26]], align 8
592 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
593 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP27]], align 8
594 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
595 // CHECK1-NEXT: store i8** null, i8*** [[TMP28]], align 8
596 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
597 // CHECK1-NEXT: store i8** null, i8*** [[TMP29]], align 8
598 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
599 // CHECK1-NEXT: store i64 0, i64* [[TMP30]], align 8
600 // CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
601 // CHECK1-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
602 // CHECK1-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
603 // CHECK1: omp_offload.failed:
604 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27(%struct.S* [[THIS1]], %class.anon.0* [[TMP2]]) #[[ATTR4]]
605 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
606 // CHECK1: omp_offload.cont:
607 // CHECK1-NEXT: [[TMP33:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8
608 // CHECK1-NEXT: store %class.anon.0* [[TMP33]], %class.anon.0** [[_TMP2]], align 8
609 // CHECK1-NEXT: [[TMP34:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
610 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP34]], i32 0, i32 0
611 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP34]], i32 0, i32 0
612 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
613 // CHECK1-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.S**
614 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP38]], align 8
615 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
616 // CHECK1-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to %struct.S**
617 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP40]], align 8
618 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
619 // CHECK1-NEXT: store i8* null, i8** [[TMP41]], align 8
620 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1
621 // CHECK1-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to %class.anon.0**
622 // CHECK1-NEXT: store %class.anon.0* [[TMP34]], %class.anon.0** [[TMP43]], align 8
623 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 1
624 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to %class.anon.0**
625 // CHECK1-NEXT: store %class.anon.0* [[TMP34]], %class.anon.0** [[TMP45]], align 8
626 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 1
627 // CHECK1-NEXT: store i8* null, i8** [[TMP46]], align 8
628 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 2
629 // CHECK1-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to %struct.S***
630 // CHECK1-NEXT: store %struct.S** [[TMP35]], %struct.S*** [[TMP48]], align 8
631 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 2
632 // CHECK1-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to %struct.S***
633 // CHECK1-NEXT: store %struct.S** [[TMP36]], %struct.S*** [[TMP50]], align 8
634 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 2
635 // CHECK1-NEXT: store i8* null, i8** [[TMP51]], align 8
636 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
637 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
638 // CHECK1-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
639 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
640 // CHECK1-NEXT: store i32 1, i32* [[TMP54]], align 4
641 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
642 // CHECK1-NEXT: store i32 3, i32* [[TMP55]], align 4
643 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
644 // CHECK1-NEXT: store i8** [[TMP52]], i8*** [[TMP56]], align 8
645 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
646 // CHECK1-NEXT: store i8** [[TMP53]], i8*** [[TMP57]], align 8
647 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
648 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.6, i32 0, i32 0), i64** [[TMP58]], align 8
649 // CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
650 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.7, i32 0, i32 0), i64** [[TMP59]], align 8
651 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
652 // CHECK1-NEXT: store i8** null, i8*** [[TMP60]], align 8
653 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
654 // CHECK1-NEXT: store i8** null, i8*** [[TMP61]], align 8
655 // CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
656 // CHECK1-NEXT: store i64 0, i64* [[TMP62]], align 8
657 // CHECK1-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
658 // CHECK1-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
659 // CHECK1-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
660 // CHECK1: omp_offload.failed7:
661 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29(%struct.S* [[THIS1]], %class.anon.0* [[TMP34]]) #[[ATTR4]]
662 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]]
663 // CHECK1: omp_offload.cont8:
664 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
665 // CHECK1-NEXT: [[TMP65:%.*]] = load i32, i32* [[A]], align 4
666 // CHECK1-NEXT: [[TMP66:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8
667 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooIZN1S3fooEvEUlvE_EiRKT_(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP66]])
668 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP65]], [[CALL]]
669 // CHECK1-NEXT: ret i32 [[ADD]]
670 //
671 //
672 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27
673 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR1]] {
674 // CHECK1-NEXT: entry:
675 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
676 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
677 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
678 // CHECK1-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
679 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
680 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
681 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
682 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
683 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
684 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
685 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
686 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8*
687 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8*
688 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
689 // CHECK1-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8
690 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
691 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP5]])
692 // CHECK1-NEXT: ret void
693 //
694 //
695 // CHECK1-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv
696 // CHECK1-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR3]] comdat align 2 {
697 // CHECK1-NEXT: entry:
698 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8
699 // CHECK1-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8
700 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8
701 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0
702 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8
703 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0
704 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
705 // CHECK1-NEXT: ret i32 [[TMP2]]
706 //
707 //
708 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29
709 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR5]] {
710 // CHECK1-NEXT: entry:
711 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
712 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
713 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
714 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
715 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
716 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
717 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
718 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
719 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
720 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S*, %class.anon.0*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S* [[TMP0]], %class.anon.0* [[TMP2]])
721 // CHECK1-NEXT: ret void
722 //
723 //
724 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
725 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR1]] {
726 // CHECK1-NEXT: entry:
727 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
728 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
729 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
730 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
731 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
732 // CHECK1-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
733 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
734 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
735 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
736 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
737 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
738 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
739 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
740 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
741 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
742 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8*
743 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8*
744 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
745 // CHECK1-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8
746 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
747 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP5]])
748 // CHECK1-NEXT: ret void
749 //
750 //
751 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIZN1S3fooEvEUlvE_EiRKT_
752 // CHECK1-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] comdat {
753 // CHECK1-NEXT: entry:
754 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8
755 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
756 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
757 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
758 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
759 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8
760 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8
761 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8
762 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
763 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[TMP1]], i32 0, i32 0
764 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP1]], i32 0, i32 0
765 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
766 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to %class.anon.0**
767 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP5]], align 8
768 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
769 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %class.anon.0**
770 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP7]], align 8
771 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
772 // CHECK1-NEXT: store i8* null, i8** [[TMP8]], align 8
773 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
774 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S***
775 // CHECK1-NEXT: store %struct.S** [[TMP2]], %struct.S*** [[TMP10]], align 8
776 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
777 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S***
778 // CHECK1-NEXT: store %struct.S** [[TMP3]], %struct.S*** [[TMP12]], align 8
779 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
780 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8
781 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
782 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
783 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
784 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
785 // CHECK1-NEXT: store i32 1, i32* [[TMP16]], align 4
786 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
787 // CHECK1-NEXT: store i32 2, i32* [[TMP17]], align 4
788 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
789 // CHECK1-NEXT: store i8** [[TMP14]], i8*** [[TMP18]], align 8
790 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
791 // CHECK1-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 8
792 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
793 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.9, i32 0, i32 0), i64** [[TMP20]], align 8
794 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
795 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.10, i32 0, i32 0), i64** [[TMP21]], align 8
796 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
797 // CHECK1-NEXT: store i8** null, i8*** [[TMP22]], align 8
798 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
799 // CHECK1-NEXT: store i8** null, i8*** [[TMP23]], align 8
800 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
801 // CHECK1-NEXT: store i64 0, i64* [[TMP24]], align 8
802 // CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
803 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
804 // CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
805 // CHECK1: omp_offload.failed:
806 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18(%class.anon.0* [[TMP1]]) #[[ATTR4]]
807 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
808 // CHECK1: omp_offload.cont:
809 // CHECK1-NEXT: ret i32 0
810 //
811 //
812 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18
813 // CHECK1-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR5]] {
814 // CHECK1-NEXT: entry:
815 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8
816 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
817 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8
818 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8
819 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8
820 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
821 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %class.anon.0*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), %class.anon.0* [[TMP1]])
822 // CHECK1-NEXT: ret void
823 //
824 //
825 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8
826 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR1]] {
827 // CHECK1-NEXT: entry:
828 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
829 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
830 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8
831 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
832 // CHECK1-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
833 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
834 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
835 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
836 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8
837 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8
838 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8
839 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
840 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast %class.anon.0* [[T1]] to i8*
841 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[TMP1]] to i8*
842 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false)
843 // CHECK1-NEXT: store %class.anon.0* [[T1]], %class.anon.0** [[_TMP2]], align 8
844 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
845 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP4]])
846 // CHECK1-NEXT: ret void
847 //
848 //
849 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
850 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] {
851 // CHECK1-NEXT: entry:
852 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
853 // CHECK1-NEXT: ret void
854 //
855 //
856 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27
857 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0:[0-9]+]] {
858 // CHECK2-NEXT: entry:
859 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
860 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
861 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8
862 // CHECK2-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
863 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
864 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
865 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
866 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
867 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
868 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8
869 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
870 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
871 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
872 // CHECK2: user_code.entry:
873 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8
874 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast %class.anon* [[L1]] to i8*
875 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[TMP3]] to i8*
876 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP4]], i8* align 8 [[TMP5]], i64 8, i1 false)
877 // CHECK2-NEXT: store %class.anon* [[L1]], %class.anon** [[_TMP2]], align 8
878 // CHECK2-NEXT: [[TMP6:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
879 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP6]], i32 0, i32 0
880 // CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP7]], align 8
881 // CHECK2-NEXT: [[TMP8:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
882 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* noundef nonnull align 8 dereferenceable(8) [[TMP8]]) #[[ATTR7:[0-9]+]]
883 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
884 // CHECK2-NEXT: ret void
885 // CHECK2: worker.exit:
886 // CHECK2-NEXT: ret void
887 //
888 //
889 // CHECK2-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv
890 // CHECK2-SAME: (%class.anon* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
891 // CHECK2-NEXT: entry:
892 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon*, align 8
893 // CHECK2-NEXT: store %class.anon* [[THIS]], %class.anon** [[THIS_ADDR]], align 8
894 // CHECK2-NEXT: [[THIS1:%.*]] = load %class.anon*, %class.anon** [[THIS_ADDR]], align 8
895 // CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON:%.*]], %class.anon* [[THIS1]], i32 0, i32 0
896 // CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8
897 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0
898 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
899 // CHECK2-NEXT: ret i32 [[TMP2]]
900 //
901 //
902 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29
903 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3:[0-9]+]] {
904 // CHECK2-NEXT: entry:
905 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
906 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
907 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8
908 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
909 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
910 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
911 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
912 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
913 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8
914 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
915 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
916 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
917 // CHECK2: user_code.entry:
918 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
919 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8
920 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
921 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %struct.S* [[TMP0]] to i8*
922 // CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
923 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
924 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast %class.anon* [[TMP4]] to i8*
925 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
926 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
927 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.S*, %class.anon*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP9]], i64 2)
928 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
929 // CHECK2-NEXT: ret void
930 // CHECK2: worker.exit:
931 // CHECK2-NEXT: ret void
932 //
933 //
934 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
935 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S* noundef [[THIS:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR4:[0-9]+]] {
936 // CHECK2-NEXT: entry:
937 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
938 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
939 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
940 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
941 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8
942 // CHECK2-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
943 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
944 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
945 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
946 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
947 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
948 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
949 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
950 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8
951 // CHECK2-NEXT: [[TMP2:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8
952 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast %class.anon* [[L1]] to i8*
953 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast %class.anon* [[TMP2]] to i8*
954 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
955 // CHECK2-NEXT: store %class.anon* [[L1]], %class.anon** [[_TMP2]], align 8
956 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
957 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP5]], i32 0, i32 0
958 // CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8
959 // CHECK2-NEXT: [[TMP7:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
960 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* noundef nonnull align 8 dereferenceable(8) [[TMP7]]) #[[ATTR7]]
961 // CHECK2-NEXT: ret void
962 //
963 //
964 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
965 // CHECK2-SAME: (i64 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] {
966 // CHECK2-NEXT: entry:
967 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
968 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
969 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
970 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
971 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
972 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
973 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8
974 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
975 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
976 // CHECK2-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
977 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca %class.anon.0*, align 8
978 // CHECK2-NEXT: [[B5:%.*]] = alloca i32, align 4
979 // CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
980 // CHECK2-NEXT: [[C7:%.*]] = alloca i32, align 4
981 // CHECK2-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8
982 // CHECK2-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
983 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
984 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
985 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
986 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
987 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
988 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
989 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
990 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8
991 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8
992 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
993 // CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8
994 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8
995 // CHECK2-NEXT: store %class.anon.0* [[TMP3]], %class.anon.0** [[_TMP2]], align 8
996 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
997 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
998 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
999 // CHECK2: user_code.entry:
1000 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1001 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %class.anon.0* [[L3]] to i8*
1002 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast %class.anon.0* [[TMP5]] to i8*
1003 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false)
1004 // CHECK2-NEXT: store %class.anon.0* [[L3]], %class.anon.0** [[_TMP4]], align 8
1005 // CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8
1006 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1007 // CHECK2-NEXT: store i32 [[TMP9]], i32* [[B5]], align 4
1008 // CHECK2-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8
1009 // CHECK2-NEXT: [[TMP10:%.*]] = load i32*, i32** [[_TMP1]], align 8
1010 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1011 // CHECK2-NEXT: store i32 [[TMP11]], i32* [[C7]], align 4
1012 // CHECK2-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8
1013 // CHECK2-NEXT: [[TMP12:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8
1014 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 0
1015 // CHECK2-NEXT: store i32* [[CONV]], i32** [[TMP13]], align 8
1016 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 1
1017 // CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP6]], align 8
1018 // CHECK2-NEXT: store i32* [[TMP15]], i32** [[TMP14]], align 8
1019 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 2
1020 // CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP8]], align 8
1021 // CHECK2-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8
1022 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 3
1023 // CHECK2-NEXT: store i32** [[D_ADDR]], i32*** [[TMP18]], align 8
1024 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 4
1025 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[TMP19]], align 8
1026 // CHECK2-NEXT: [[TMP20:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8
1027 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR7]]
1028 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1029 // CHECK2-NEXT: ret void
1030 // CHECK2: worker.exit:
1031 // CHECK2-NEXT: ret void
1032 //
1033 //
1034 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43
1035 // CHECK2-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3]] {
1036 // CHECK2-NEXT: entry:
1037 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
1038 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
1039 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
1040 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
1041 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
1042 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
1043 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1044 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1045 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
1046 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [6 x i8*], align 8
1047 // CHECK2-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
1048 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
1049 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
1050 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
1051 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
1052 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
1053 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
1054 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
1055 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
1056 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1057 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
1058 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
1059 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
1060 // CHECK2-NEXT: store %class.anon.0* [[TMP4]], %class.anon.0** [[_TMP2]], align 8
1061 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
1062 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1
1063 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1064 // CHECK2: user_code.entry:
1065 // CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1066 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8
1067 // CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8
1068 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[D_ADDR]], align 8
1069 // CHECK2-NEXT: [[TMP10:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1070 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
1071 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to i8*
1072 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
1073 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
1074 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP7]] to i8*
1075 // CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
1076 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
1077 // CHECK2-NEXT: [[TMP16:%.*]] = bitcast i32* [[TMP8]] to i8*
1078 // CHECK2-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
1079 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
1080 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP9]] to i8*
1081 // CHECK2-NEXT: store i8* [[TMP18]], i8** [[TMP17]], align 8
1082 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
1083 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP3]] to i8*
1084 // CHECK2-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
1085 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 5
1086 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast %class.anon.0* [[TMP10]] to i8*
1087 // CHECK2-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
1088 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast [6 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1089 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP23]], i64 6)
1090 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
1091 // CHECK2-NEXT: ret void
1092 // CHECK2: worker.exit:
1093 // CHECK2-NEXT: ret void
1094 //
1095 //
1096 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
1097 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR4]] {
1098 // CHECK2-NEXT: entry:
1099 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1100 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1101 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
1102 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
1103 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
1104 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
1105 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
1106 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
1107 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1108 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1109 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
1110 // CHECK2-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1111 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca %class.anon.0*, align 8
1112 // CHECK2-NEXT: [[ARGC5:%.*]] = alloca i32, align 4
1113 // CHECK2-NEXT: [[B6:%.*]] = alloca i32, align 4
1114 // CHECK2-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8
1115 // CHECK2-NEXT: [[C8:%.*]] = alloca i32, align 4
1116 // CHECK2-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8
1117 // CHECK2-NEXT: [[A10:%.*]] = alloca i32, align 4
1118 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1119 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1120 // CHECK2-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
1121 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
1122 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
1123 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
1124 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
1125 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
1126 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
1127 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
1128 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
1129 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1130 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
1131 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
1132 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
1133 // CHECK2-NEXT: store %class.anon.0* [[TMP4]], %class.anon.0** [[_TMP2]], align 8
1134 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1135 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %class.anon.0* [[L3]] to i8*
1136 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast %class.anon.0* [[TMP5]] to i8*
1137 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false)
1138 // CHECK2-NEXT: store %class.anon.0* [[L3]], %class.anon.0** [[_TMP4]], align 8
1139 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
1140 // CHECK2-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4
1141 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8
1142 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
1143 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4
1144 // CHECK2-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8
1145 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8
1146 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1147 // CHECK2-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4
1148 // CHECK2-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8
1149 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4
1150 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4
1151 // CHECK2-NEXT: [[TMP14:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8
1152 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 0
1153 // CHECK2-NEXT: store i32* [[ARGC5]], i32** [[TMP15]], align 8
1154 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 1
1155 // CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP7]], align 8
1156 // CHECK2-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8
1157 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 2
1158 // CHECK2-NEXT: [[TMP19:%.*]] = load i32*, i32** [[_TMP9]], align 8
1159 // CHECK2-NEXT: store i32* [[TMP19]], i32** [[TMP18]], align 8
1160 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 3
1161 // CHECK2-NEXT: store i32** [[D_ADDR]], i32*** [[TMP20]], align 8
1162 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 4
1163 // CHECK2-NEXT: store i32* [[A10]], i32** [[TMP21]], align 8
1164 // CHECK2-NEXT: [[TMP22:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8
1165 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(40) [[TMP22]]) #[[ATTR7]]
1166 // CHECK2-NEXT: ret void
1167 //
1168 //
1169 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18
1170 // CHECK2-SAME: (%class.anon* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] {
1171 // CHECK2-NEXT: entry:
1172 // CHECK2-NEXT: [[T_ADDR:%.*]] = alloca %class.anon*, align 8
1173 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8
1174 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
1175 // CHECK2-NEXT: store %class.anon* [[T]], %class.anon** [[T_ADDR]], align 8
1176 // CHECK2-NEXT: [[TMP0:%.*]] = load %class.anon*, %class.anon** [[T_ADDR]], align 8
1177 // CHECK2-NEXT: store %class.anon* [[TMP0]], %class.anon** [[TMP]], align 8
1178 // CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
1179 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1180 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1181 // CHECK2: user_code.entry:
1182 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1183 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8
1184 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
1185 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[TMP3]] to i8*
1186 // CHECK2-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
1187 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1188 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %class.anon*)* @__omp_outlined__2 to i8*), i8* null, i8** [[TMP6]], i64 1)
1189 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
1190 // CHECK2-NEXT: ret void
1191 // CHECK2: worker.exit:
1192 // CHECK2-NEXT: ret void
1193 //
1194 //
1195 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2
1196 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR4]] {
1197 // CHECK2-NEXT: entry:
1198 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1199 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1200 // CHECK2-NEXT: [[T_ADDR:%.*]] = alloca %class.anon*, align 8
1201 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8
1202 // CHECK2-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
1203 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
1204 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1205 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1206 // CHECK2-NEXT: store %class.anon* [[T]], %class.anon** [[T_ADDR]], align 8
1207 // CHECK2-NEXT: [[TMP0:%.*]] = load %class.anon*, %class.anon** [[T_ADDR]], align 8
1208 // CHECK2-NEXT: store %class.anon* [[TMP0]], %class.anon** [[TMP]], align 8
1209 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8
1210 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast %class.anon* [[T1]] to i8*
1211 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast %class.anon* [[TMP1]] to i8*
1212 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false)
1213 // CHECK2-NEXT: store %class.anon* [[T1]], %class.anon** [[_TMP2]], align 8
1214 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
1215 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
1216 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR7]]
1217 // CHECK2-NEXT: ret void
1218 //
1219 //
1220 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
1221 // CHECK3-SAME: (i64 noundef [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0:[0-9]+]] {
1222 // CHECK3-NEXT: entry:
1223 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
1224 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
1225 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
1226 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
1227 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
1228 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
1229 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1230 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1231 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
1232 // CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
1233 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8
1234 // CHECK3-NEXT: [[B5:%.*]] = alloca i32, align 4
1235 // CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
1236 // CHECK3-NEXT: [[C7:%.*]] = alloca i32, align 4
1237 // CHECK3-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8
1238 // CHECK3-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8
1239 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
1240 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
1241 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
1242 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
1243 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
1244 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32*
1245 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
1246 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8
1247 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1248 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
1249 // CHECK3-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8
1250 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8
1251 // CHECK3-NEXT: store %class.anon* [[TMP3]], %class.anon** [[_TMP2]], align 8
1252 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
1253 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
1254 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1255 // CHECK3: user_code.entry:
1256 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
1257 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8*
1258 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8*
1259 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false)
1260 // CHECK3-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8
1261 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8
1262 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1263 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[B5]], align 4
1264 // CHECK3-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8
1265 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[_TMP1]], align 8
1266 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1267 // CHECK3-NEXT: store i32 [[TMP11]], i32* [[C7]], align 4
1268 // CHECK3-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8
1269 // CHECK3-NEXT: [[TMP12:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
1270 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 0
1271 // CHECK3-NEXT: store i32* [[CONV]], i32** [[TMP13]], align 8
1272 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 1
1273 // CHECK3-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP6]], align 8
1274 // CHECK3-NEXT: store i32* [[TMP15]], i32** [[TMP14]], align 8
1275 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 2
1276 // CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP8]], align 8
1277 // CHECK3-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8
1278 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 3
1279 // CHECK3-NEXT: store i32** [[D_ADDR]], i32*** [[TMP18]], align 8
1280 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 4
1281 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[TMP19]], align 8
1282 // CHECK3-NEXT: [[TMP20:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
1283 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR7:[0-9]+]]
1284 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1285 // CHECK3-NEXT: ret void
1286 // CHECK3: worker.exit:
1287 // CHECK3-NEXT: ret void
1288 //
1289 //
1290 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43
1291 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3:[0-9]+]] {
1292 // CHECK3-NEXT: entry:
1293 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
1294 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
1295 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
1296 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
1297 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
1298 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
1299 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1300 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1301 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
1302 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [6 x i8*], align 8
1303 // CHECK3-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
1304 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
1305 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
1306 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
1307 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
1308 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
1309 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
1310 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
1311 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
1312 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1313 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
1314 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
1315 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
1316 // CHECK3-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8
1317 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
1318 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1
1319 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1320 // CHECK3: user_code.entry:
1321 // CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
1322 // CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8
1323 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8
1324 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[D_ADDR]], align 8
1325 // CHECK3-NEXT: [[TMP10:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
1326 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
1327 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to i8*
1328 // CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
1329 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
1330 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP7]] to i8*
1331 // CHECK3-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
1332 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
1333 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i32* [[TMP8]] to i8*
1334 // CHECK3-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
1335 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
1336 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP9]] to i8*
1337 // CHECK3-NEXT: store i8* [[TMP18]], i8** [[TMP17]], align 8
1338 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
1339 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP3]] to i8*
1340 // CHECK3-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
1341 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 5
1342 // CHECK3-NEXT: [[TMP22:%.*]] = bitcast %class.anon* [[TMP10]] to i8*
1343 // CHECK3-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
1344 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast [6 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1345 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP23]], i64 6)
1346 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
1347 // CHECK3-NEXT: ret void
1348 // CHECK3: worker.exit:
1349 // CHECK3-NEXT: ret void
1350 //
1351 //
1352 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__
1353 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32* noundef [[D:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR4:[0-9]+]] {
1354 // CHECK3-NEXT: entry:
1355 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1356 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1357 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8
1358 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
1359 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
1360 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
1361 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
1362 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8
1363 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1364 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1365 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8
1366 // CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
1367 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8
1368 // CHECK3-NEXT: [[ARGC5:%.*]] = alloca i32, align 4
1369 // CHECK3-NEXT: [[B6:%.*]] = alloca i32, align 4
1370 // CHECK3-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8
1371 // CHECK3-NEXT: [[C8:%.*]] = alloca i32, align 4
1372 // CHECK3-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8
1373 // CHECK3-NEXT: [[A10:%.*]] = alloca i32, align 4
1374 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1375 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1376 // CHECK3-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
1377 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
1378 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
1379 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
1380 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
1381 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8
1382 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8
1383 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8
1384 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8
1385 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1386 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8
1387 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
1388 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8
1389 // CHECK3-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8
1390 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8
1391 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8*
1392 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8*
1393 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false)
1394 // CHECK3-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8
1395 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4
1396 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4
1397 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8
1398 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
1399 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4
1400 // CHECK3-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8
1401 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8
1402 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1403 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4
1404 // CHECK3-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8
1405 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4
1406 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4
1407 // CHECK3-NEXT: [[TMP14:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
1408 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 0
1409 // CHECK3-NEXT: store i32* [[ARGC5]], i32** [[TMP15]], align 8
1410 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 1
1411 // CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP7]], align 8
1412 // CHECK3-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8
1413 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 2
1414 // CHECK3-NEXT: [[TMP19:%.*]] = load i32*, i32** [[_TMP9]], align 8
1415 // CHECK3-NEXT: store i32* [[TMP19]], i32** [[TMP18]], align 8
1416 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 3
1417 // CHECK3-NEXT: store i32** [[D_ADDR]], i32*** [[TMP20]], align 8
1418 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 4
1419 // CHECK3-NEXT: store i32* [[A10]], i32** [[TMP21]], align 8
1420 // CHECK3-NEXT: [[TMP22:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8
1421 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[TMP22]]) #[[ATTR7]]
1422 // CHECK3-NEXT: ret void
1423 //
1424 //
1425 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27
1426 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] {
1427 // CHECK3-NEXT: entry:
1428 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1429 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
1430 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
1431 // CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1432 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
1433 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1434 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
1435 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1436 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
1437 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
1438 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
1439 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1440 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1441 // CHECK3: user_code.entry:
1442 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
1443 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[L1]] to i8*
1444 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8*
1445 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP4]], i8* align 8 [[TMP5]], i64 8, i1 false)
1446 // CHECK3-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8
1447 // CHECK3-NEXT: [[TMP6:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1448 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP6]], i32 0, i32 0
1449 // CHECK3-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP7]], align 8
1450 // CHECK3-NEXT: [[TMP8:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1451 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP8]]) #[[ATTR7]]
1452 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1453 // CHECK3-NEXT: ret void
1454 // CHECK3: worker.exit:
1455 // CHECK3-NEXT: ret void
1456 //
1457 //
1458 // CHECK3-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv
1459 // CHECK3-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
1460 // CHECK3-NEXT: entry:
1461 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8
1462 // CHECK3-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8
1463 // CHECK3-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8
1464 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0
1465 // CHECK3-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8
1466 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0
1467 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
1468 // CHECK3-NEXT: ret i32 [[TMP2]]
1469 //
1470 //
1471 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29
1472 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3]] {
1473 // CHECK3-NEXT: entry:
1474 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1475 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
1476 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
1477 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
1478 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1479 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
1480 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1481 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
1482 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
1483 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
1484 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1485 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1486 // CHECK3: user_code.entry:
1487 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1488 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
1489 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
1490 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %struct.S* [[TMP0]] to i8*
1491 // CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
1492 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
1493 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast %class.anon.0* [[TMP4]] to i8*
1494 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
1495 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1496 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.S*, %class.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP9]], i64 2)
1497 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
1498 // CHECK3-NEXT: ret void
1499 // CHECK3: worker.exit:
1500 // CHECK3-NEXT: ret void
1501 //
1502 //
1503 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1
1504 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S* noundef [[THIS:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR4]] {
1505 // CHECK3-NEXT: entry:
1506 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1507 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1508 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1509 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8
1510 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
1511 // CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1512 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
1513 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1514 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1515 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1516 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8
1517 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1518 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8
1519 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8
1520 // CHECK3-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
1521 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8*
1522 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8*
1523 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
1524 // CHECK3-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8
1525 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1526 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP5]], i32 0, i32 0
1527 // CHECK3-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8
1528 // CHECK3-NEXT: [[TMP7:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1529 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP7]]) #[[ATTR7]]
1530 // CHECK3-NEXT: ret void
1531 //
1532 //
1533 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18
1534 // CHECK3-SAME: (%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] {
1535 // CHECK3-NEXT: entry:
1536 // CHECK3-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8
1537 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
1538 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
1539 // CHECK3-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8
1540 // CHECK3-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8
1541 // CHECK3-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8
1542 // CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
1543 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1544 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1545 // CHECK3: user_code.entry:
1546 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1547 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
1548 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
1549 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8*
1550 // CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
1551 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1552 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %class.anon.0*)* @__omp_outlined__2 to i8*), i8* null, i8** [[TMP6]], i64 1)
1553 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
1554 // CHECK3-NEXT: ret void
1555 // CHECK3: worker.exit:
1556 // CHECK3-NEXT: ret void
1557 //
1558 //
1559 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__2
1560 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %class.anon.0* noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR4]] {
1561 // CHECK3-NEXT: entry:
1562 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1563 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1564 // CHECK3-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8
1565 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8
1566 // CHECK3-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1567 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8
1568 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1569 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1570 // CHECK3-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8
1571 // CHECK3-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8
1572 // CHECK3-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8
1573 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8
1574 // CHECK3-NEXT: [[TMP2:%.*]] = bitcast %class.anon.0* [[T1]] to i8*
1575 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[TMP1]] to i8*
1576 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false)
1577 // CHECK3-NEXT: store %class.anon.0* [[T1]], %class.anon.0** [[_TMP2]], align 8
1578 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1579 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8
1580 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR7]]
1581 // CHECK3-NEXT: ret void
1582 //
1583