1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // REQUIRES: powerpc-registered-target 3 // REQUIRES: nvptx-registered-target 4 5 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 6 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc 7 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK2 8 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -emit-pch -o %t 9 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - | FileCheck %s --check-prefix=CHECK3 10 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -o - | FileCheck %s --check-prefix=CHECK4 11 12 // expected-no-diagnostics 13 #ifndef HEADER 14 #define HEADER 15 16 template <typename T> 17 int foo(const T &t) { 18 #pragma omp target parallel 19 t(); 20 return 0; 21 } 22 23 struct S { 24 int a = 15; 25 int foo() { 26 auto &&L = [&]() { return a; }; 27 #pragma omp target 28 L(); 29 #pragma omp target parallel 30 L(); 31 return a + ::foo(L); 32 } 33 } s; 34 35 int main(int argc, char **argv) { 36 int &b = argc; 37 int &&c = 1; 38 int *d = &argc; 39 int a; 40 auto &&L = [&]() { return argc + b + c + reinterpret_cast<long int>(d) + a; }; 41 #pragma omp target firstprivate(argc) map(to : a) 42 L(); 43 #pragma omp target parallel 44 L(); 45 return argc + s.foo(); 46 } 47 48 #endif // HEADER 49 // CHECK1-LABEL: define {{[^@]+}}@main 50 // CHECK1-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 51 // CHECK1-NEXT: entry: 52 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 53 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 54 // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 55 // CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8 56 // CHECK1-NEXT: [[C:%.*]] = alloca i32*, align 8 57 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4 58 // CHECK1-NEXT: [[D:%.*]] = alloca i32*, align 8 59 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 60 // CHECK1-NEXT: [[L:%.*]] = alloca %class.anon*, align 8 61 // CHECK1-NEXT: [[REF_TMP1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 62 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8 63 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8 64 // CHECK1-NEXT: [[_TMP3:%.*]] = alloca %class.anon*, align 8 65 // CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8 66 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [11 x i8*], align 8 67 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [11 x i8*], align 8 68 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [11 x i8*], align 8 69 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32*, align 8 70 // CHECK1-NEXT: [[_TMP5:%.*]] = alloca i32*, align 8 71 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca %class.anon*, align 8 72 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [11 x i8*], align 8 73 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [11 x i8*], align 8 74 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [11 x i8*], align 8 75 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 76 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 77 // CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 78 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[B]], align 8 79 // CHECK1-NEXT: store i32 1, i32* [[REF_TMP]], align 4 80 // CHECK1-NEXT: store i32* [[REF_TMP]], i32** [[C]], align 8 81 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[D]], align 8 82 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 0 83 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP0]], align 8 84 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 1 85 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B]], align 8 86 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[TMP1]], align 8 87 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 2 88 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[C]], align 8 89 // CHECK1-NEXT: store i32* [[TMP4]], i32** [[TMP3]], align 8 90 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 3 91 // CHECK1-NEXT: store i32** [[D]], i32*** [[TMP5]], align 8 92 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP1]], i32 0, i32 4 93 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP6]], align 8 94 // CHECK1-NEXT: store %class.anon* [[REF_TMP1]], %class.anon** [[L]], align 8 95 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[B]], align 8 96 // CHECK1-NEXT: store i32* [[TMP7]], i32** [[TMP]], align 8 97 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[C]], align 8 98 // CHECK1-NEXT: store i32* [[TMP8]], i32** [[_TMP2]], align 8 99 // CHECK1-NEXT: [[TMP9:%.*]] = load %class.anon*, %class.anon** [[L]], align 8 100 // CHECK1-NEXT: store %class.anon* [[TMP9]], %class.anon** [[_TMP3]], align 8 101 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 102 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_CASTED]] to i32* 103 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[CONV]], align 4 104 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[ARGC_CASTED]], align 8 105 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP]], align 8 106 // CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8 107 // CHECK1-NEXT: [[TMP14:%.*]] = load i32*, i32** [[D]], align 8 108 // CHECK1-NEXT: [[TMP15:%.*]] = load %class.anon*, %class.anon** [[_TMP3]], align 8 109 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 0 110 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 0 111 // CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP17]], align 8 112 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 1 113 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 1 114 // CHECK1-NEXT: [[TMP21:%.*]] = load i32*, i32** [[TMP20]], align 8 115 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 2 116 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 2 117 // CHECK1-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP23]], align 8 118 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 3 119 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 3 120 // CHECK1-NEXT: [[TMP27:%.*]] = load i32**, i32*** [[TMP26]], align 8 121 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 4 122 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP15]], i32 0, i32 4 123 // CHECK1-NEXT: [[TMP30:%.*]] = load i32*, i32** [[TMP29]], align 8 124 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 125 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 126 // CHECK1-NEXT: store i64 [[TMP11]], i64* [[TMP32]], align 8 127 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 128 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 129 // CHECK1-NEXT: store i64 [[TMP11]], i64* [[TMP34]], align 8 130 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 131 // CHECK1-NEXT: store i8* null, i8** [[TMP35]], align 8 132 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 133 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** 134 // CHECK1-NEXT: store i32* [[TMP12]], i32** [[TMP37]], align 8 135 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 136 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** 137 // CHECK1-NEXT: store i32* [[TMP12]], i32** [[TMP39]], align 8 138 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 139 // CHECK1-NEXT: store i8* null, i8** [[TMP40]], align 8 140 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 141 // CHECK1-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32** 142 // CHECK1-NEXT: store i32* [[TMP13]], i32** [[TMP42]], align 8 143 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 144 // CHECK1-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** 145 // CHECK1-NEXT: store i32* [[TMP13]], i32** [[TMP44]], align 8 146 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 147 // CHECK1-NEXT: store i8* null, i8** [[TMP45]], align 8 148 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 149 // CHECK1-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32** 150 // CHECK1-NEXT: store i32* [[TMP14]], i32** [[TMP47]], align 8 151 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 152 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** 153 // CHECK1-NEXT: store i32* [[TMP14]], i32** [[TMP49]], align 8 154 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 155 // CHECK1-NEXT: store i8* null, i8** [[TMP50]], align 8 156 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 157 // CHECK1-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** 158 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP52]], align 8 159 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 160 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32** 161 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP54]], align 8 162 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 163 // CHECK1-NEXT: store i8* null, i8** [[TMP55]], align 8 164 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 165 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to %class.anon** 166 // CHECK1-NEXT: store %class.anon* [[TMP15]], %class.anon** [[TMP57]], align 8 167 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 168 // CHECK1-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to %class.anon** 169 // CHECK1-NEXT: store %class.anon* [[TMP15]], %class.anon** [[TMP59]], align 8 170 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 171 // CHECK1-NEXT: store i8* null, i8** [[TMP60]], align 8 172 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 173 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*** 174 // CHECK1-NEXT: store i32** [[TMP16]], i32*** [[TMP62]], align 8 175 // CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 176 // CHECK1-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32** 177 // CHECK1-NEXT: store i32* [[TMP18]], i32** [[TMP64]], align 8 178 // CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 179 // CHECK1-NEXT: store i8* null, i8** [[TMP65]], align 8 180 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 181 // CHECK1-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32*** 182 // CHECK1-NEXT: store i32** [[TMP19]], i32*** [[TMP67]], align 8 183 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 184 // CHECK1-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** 185 // CHECK1-NEXT: store i32* [[TMP21]], i32** [[TMP69]], align 8 186 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 187 // CHECK1-NEXT: store i8* null, i8** [[TMP70]], align 8 188 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 189 // CHECK1-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*** 190 // CHECK1-NEXT: store i32** [[TMP22]], i32*** [[TMP72]], align 8 191 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 192 // CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** 193 // CHECK1-NEXT: store i32* [[TMP24]], i32** [[TMP74]], align 8 194 // CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8 195 // CHECK1-NEXT: store i8* null, i8** [[TMP75]], align 8 196 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 197 // CHECK1-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32**** 198 // CHECK1-NEXT: store i32*** [[TMP25]], i32**** [[TMP77]], align 8 199 // CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 200 // CHECK1-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*** 201 // CHECK1-NEXT: store i32** [[TMP27]], i32*** [[TMP79]], align 8 202 // CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9 203 // CHECK1-NEXT: store i8* null, i8** [[TMP80]], align 8 204 // CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 10 205 // CHECK1-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32*** 206 // CHECK1-NEXT: store i32** [[TMP28]], i32*** [[TMP82]], align 8 207 // CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 10 208 // CHECK1-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** 209 // CHECK1-NEXT: store i32* [[TMP30]], i32** [[TMP84]], align 8 210 // CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 10 211 // CHECK1-NEXT: store i8* null, i8** [[TMP85]], align 8 212 // CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 213 // CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 214 // CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41.region_id, i32 11, i8** [[TMP86]], i8** [[TMP87]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) 215 // CHECK1-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 216 // CHECK1-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 217 // CHECK1: omp_offload.failed: 218 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41(i64 [[TMP11]], i32* [[TMP12]], i32* [[TMP13]], i32* [[TMP14]], i32* [[A]], %class.anon* [[TMP15]]) #[[ATTR4:[0-9]+]] 219 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 220 // CHECK1: omp_offload.cont: 221 // CHECK1-NEXT: [[TMP90:%.*]] = load i32*, i32** [[B]], align 8 222 // CHECK1-NEXT: store i32* [[TMP90]], i32** [[_TMP4]], align 8 223 // CHECK1-NEXT: [[TMP91:%.*]] = load i32*, i32** [[C]], align 8 224 // CHECK1-NEXT: store i32* [[TMP91]], i32** [[_TMP5]], align 8 225 // CHECK1-NEXT: [[TMP92:%.*]] = load %class.anon*, %class.anon** [[L]], align 8 226 // CHECK1-NEXT: store %class.anon* [[TMP92]], %class.anon** [[_TMP6]], align 8 227 // CHECK1-NEXT: [[TMP93:%.*]] = load i32*, i32** [[_TMP4]], align 8 228 // CHECK1-NEXT: [[TMP94:%.*]] = load i32*, i32** [[_TMP5]], align 8 229 // CHECK1-NEXT: [[TMP95:%.*]] = load i32*, i32** [[D]], align 8 230 // CHECK1-NEXT: [[TMP96:%.*]] = load %class.anon*, %class.anon** [[_TMP6]], align 8 231 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 0 232 // CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 0 233 // CHECK1-NEXT: [[TMP99:%.*]] = load i32*, i32** [[TMP98]], align 8 234 // CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 1 235 // CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 1 236 // CHECK1-NEXT: [[TMP102:%.*]] = load i32*, i32** [[TMP101]], align 8 237 // CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 2 238 // CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 2 239 // CHECK1-NEXT: [[TMP105:%.*]] = load i32*, i32** [[TMP104]], align 8 240 // CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 3 241 // CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 3 242 // CHECK1-NEXT: [[TMP108:%.*]] = load i32**, i32*** [[TMP107]], align 8 243 // CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 4 244 // CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP96]], i32 0, i32 4 245 // CHECK1-NEXT: [[TMP111:%.*]] = load i32*, i32** [[TMP110]], align 8 246 // CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 247 // CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32** 248 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP113]], align 8 249 // CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 250 // CHECK1-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32** 251 // CHECK1-NEXT: store i32* [[ARGC_ADDR]], i32** [[TMP115]], align 8 252 // CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 253 // CHECK1-NEXT: store i8* null, i8** [[TMP116]], align 8 254 // CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1 255 // CHECK1-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** 256 // CHECK1-NEXT: store i32* [[TMP93]], i32** [[TMP118]], align 8 257 // CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1 258 // CHECK1-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** 259 // CHECK1-NEXT: store i32* [[TMP93]], i32** [[TMP120]], align 8 260 // CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1 261 // CHECK1-NEXT: store i8* null, i8** [[TMP121]], align 8 262 // CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2 263 // CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** 264 // CHECK1-NEXT: store i32* [[TMP94]], i32** [[TMP123]], align 8 265 // CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2 266 // CHECK1-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i32** 267 // CHECK1-NEXT: store i32* [[TMP94]], i32** [[TMP125]], align 8 268 // CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 2 269 // CHECK1-NEXT: store i8* null, i8** [[TMP126]], align 8 270 // CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 3 271 // CHECK1-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32** 272 // CHECK1-NEXT: store i32* [[TMP95]], i32** [[TMP128]], align 8 273 // CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 3 274 // CHECK1-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i32** 275 // CHECK1-NEXT: store i32* [[TMP95]], i32** [[TMP130]], align 8 276 // CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 3 277 // CHECK1-NEXT: store i8* null, i8** [[TMP131]], align 8 278 // CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 4 279 // CHECK1-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32** 280 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP133]], align 8 281 // CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 4 282 // CHECK1-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** 283 // CHECK1-NEXT: store i32* [[A]], i32** [[TMP135]], align 8 284 // CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 4 285 // CHECK1-NEXT: store i8* null, i8** [[TMP136]], align 8 286 // CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 5 287 // CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %class.anon** 288 // CHECK1-NEXT: store %class.anon* [[TMP96]], %class.anon** [[TMP138]], align 8 289 // CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 5 290 // CHECK1-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %class.anon** 291 // CHECK1-NEXT: store %class.anon* [[TMP96]], %class.anon** [[TMP140]], align 8 292 // CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 5 293 // CHECK1-NEXT: store i8* null, i8** [[TMP141]], align 8 294 // CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 6 295 // CHECK1-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*** 296 // CHECK1-NEXT: store i32** [[TMP97]], i32*** [[TMP143]], align 8 297 // CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 6 298 // CHECK1-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32** 299 // CHECK1-NEXT: store i32* [[TMP99]], i32** [[TMP145]], align 8 300 // CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 6 301 // CHECK1-NEXT: store i8* null, i8** [[TMP146]], align 8 302 // CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 7 303 // CHECK1-NEXT: [[TMP148:%.*]] = bitcast i8** [[TMP147]] to i32*** 304 // CHECK1-NEXT: store i32** [[TMP100]], i32*** [[TMP148]], align 8 305 // CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 7 306 // CHECK1-NEXT: [[TMP150:%.*]] = bitcast i8** [[TMP149]] to i32** 307 // CHECK1-NEXT: store i32* [[TMP102]], i32** [[TMP150]], align 8 308 // CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 7 309 // CHECK1-NEXT: store i8* null, i8** [[TMP151]], align 8 310 // CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 8 311 // CHECK1-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32*** 312 // CHECK1-NEXT: store i32** [[TMP103]], i32*** [[TMP153]], align 8 313 // CHECK1-NEXT: [[TMP154:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 8 314 // CHECK1-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** 315 // CHECK1-NEXT: store i32* [[TMP105]], i32** [[TMP155]], align 8 316 // CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 8 317 // CHECK1-NEXT: store i8* null, i8** [[TMP156]], align 8 318 // CHECK1-NEXT: [[TMP157:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 9 319 // CHECK1-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32**** 320 // CHECK1-NEXT: store i32*** [[TMP106]], i32**** [[TMP158]], align 8 321 // CHECK1-NEXT: [[TMP159:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 9 322 // CHECK1-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32*** 323 // CHECK1-NEXT: store i32** [[TMP108]], i32*** [[TMP160]], align 8 324 // CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 9 325 // CHECK1-NEXT: store i8* null, i8** [[TMP161]], align 8 326 // CHECK1-NEXT: [[TMP162:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 10 327 // CHECK1-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*** 328 // CHECK1-NEXT: store i32** [[TMP109]], i32*** [[TMP163]], align 8 329 // CHECK1-NEXT: [[TMP164:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 10 330 // CHECK1-NEXT: [[TMP165:%.*]] = bitcast i8** [[TMP164]] to i32** 331 // CHECK1-NEXT: store i32* [[TMP111]], i32** [[TMP165]], align 8 332 // CHECK1-NEXT: [[TMP166:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 10 333 // CHECK1-NEXT: store i8* null, i8** [[TMP166]], align 8 334 // CHECK1-NEXT: [[TMP167:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 335 // CHECK1-NEXT: [[TMP168:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 336 // CHECK1-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43.region_id, i32 11, i8** [[TMP167]], i8** [[TMP168]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.1, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 337 // CHECK1-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 338 // CHECK1-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] 339 // CHECK1: omp_offload.failed10: 340 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43(i32* [[ARGC_ADDR]], i32* [[TMP93]], i32* [[TMP94]], i32* [[TMP95]], i32* [[A]], %class.anon* [[TMP96]]) #[[ATTR4]] 341 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT11]] 342 // CHECK1: omp_offload.cont11: 343 // CHECK1-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 344 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZN1S3fooEv(%struct.S* nonnull align 4 dereferenceable(4) @s) 345 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP171]], [[CALL]] 346 // CHECK1-NEXT: ret i32 [[ADD]] 347 // 348 // 349 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41 350 // CHECK1-SAME: (i64 [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR1:[0-9]+]] { 351 // CHECK1-NEXT: entry: 352 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8 353 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 354 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 355 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 356 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 357 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 358 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8 359 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 360 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 361 // CHECK1-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 362 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 363 // CHECK1-NEXT: [[B5:%.*]] = alloca i32, align 4 364 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8 365 // CHECK1-NEXT: [[C7:%.*]] = alloca i32, align 4 366 // CHECK1-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8 367 // CHECK1-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8 368 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 369 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 370 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 371 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 372 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 373 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32* 374 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8 375 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8 376 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 377 // CHECK1-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 378 // CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8 379 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8 380 // CHECK1-NEXT: store %class.anon* [[TMP3]], %class.anon** [[_TMP2]], align 8 381 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 382 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[L3]] to i8* 383 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[TMP4]] to i8* 384 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP5]], i8* align 8 [[TMP6]], i64 40, i1 false) 385 // CHECK1-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 386 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8 387 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 388 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[B5]], align 4 389 // CHECK1-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8 390 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP1]], align 8 391 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 392 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[C7]], align 4 393 // CHECK1-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8 394 // CHECK1-NEXT: [[TMP11:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 395 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP11]]) 396 // CHECK1-NEXT: ret void 397 // 398 // 399 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43 400 // CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR5:[0-9]+]] { 401 // CHECK1-NEXT: entry: 402 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 403 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 404 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 405 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 406 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 407 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 408 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8 409 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 410 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 411 // CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 412 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 413 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 414 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 415 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 416 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 417 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 418 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 419 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 420 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 421 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 422 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 423 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 424 // CHECK1-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 425 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP]], align 8 426 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP1]], align 8 427 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[D_ADDR]], align 8 428 // CHECK1-NEXT: [[TMP8:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 429 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP5]], i32* [[TMP6]], i32* [[TMP7]], i32* [[TMP3]], %class.anon* [[TMP8]]) 430 // CHECK1-NEXT: ret void 431 // 432 // 433 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 434 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR1]] { 435 // CHECK1-NEXT: entry: 436 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 437 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 438 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 439 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 440 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 441 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 442 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 443 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 444 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8 445 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 446 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 447 // CHECK1-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 448 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 449 // CHECK1-NEXT: [[ARGC5:%.*]] = alloca i32, align 4 450 // CHECK1-NEXT: [[B6:%.*]] = alloca i32, align 4 451 // CHECK1-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8 452 // CHECK1-NEXT: [[C8:%.*]] = alloca i32, align 4 453 // CHECK1-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8 454 // CHECK1-NEXT: [[A10:%.*]] = alloca i32, align 4 455 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 456 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 457 // CHECK1-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 458 // CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 459 // CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 460 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 461 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 462 // CHECK1-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 463 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 464 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 465 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 466 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 467 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 468 // CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 469 // CHECK1-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 470 // CHECK1-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 471 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 472 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8* 473 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8* 474 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 475 // CHECK1-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 476 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 477 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4 478 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8 479 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 480 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4 481 // CHECK1-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8 482 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8 483 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 484 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4 485 // CHECK1-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8 486 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4 487 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4 488 // CHECK1-NEXT: [[TMP14:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 489 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP14]]) 490 // CHECK1-NEXT: ret void 491 // 492 // 493 // CHECK1-LABEL: define {{[^@]+}}@_ZN1S3fooEv 494 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 { 495 // CHECK1-NEXT: entry: 496 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 497 // CHECK1-NEXT: [[L:%.*]] = alloca %class.anon.0*, align 8 498 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 499 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 500 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 501 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 502 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 503 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 504 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [3 x i8*], align 8 505 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [3 x i8*], align 8 506 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [3 x i8*], align 8 507 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 508 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 509 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 510 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP0]], align 8 511 // CHECK1-NEXT: store %class.anon.0* [[REF_TMP]], %class.anon.0** [[L]], align 8 512 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8 513 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 514 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 515 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP2]], i32 0, i32 0 516 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP2]], i32 0, i32 0 517 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 518 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to %struct.S** 519 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP6]], align 8 520 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 521 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to %struct.S** 522 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP8]], align 8 523 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 524 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 525 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 526 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %class.anon.0** 527 // CHECK1-NEXT: store %class.anon.0* [[TMP2]], %class.anon.0** [[TMP11]], align 8 528 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 529 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %class.anon.0** 530 // CHECK1-NEXT: store %class.anon.0* [[TMP2]], %class.anon.0** [[TMP13]], align 8 531 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 532 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 533 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 534 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S*** 535 // CHECK1-NEXT: store %struct.S** [[TMP3]], %struct.S*** [[TMP16]], align 8 536 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 537 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.S*** 538 // CHECK1-NEXT: store %struct.S** [[TMP4]], %struct.S*** [[TMP18]], align 8 539 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 540 // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 541 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 542 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 543 // CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null) 544 // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 545 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 546 // CHECK1: omp_offload.failed: 547 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27(%struct.S* [[THIS1]], %class.anon.0* [[TMP2]]) #[[ATTR4]] 548 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 549 // CHECK1: omp_offload.cont: 550 // CHECK1-NEXT: [[TMP24:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8 551 // CHECK1-NEXT: store %class.anon.0* [[TMP24]], %class.anon.0** [[_TMP2]], align 8 552 // CHECK1-NEXT: [[TMP25:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 553 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP25]], i32 0, i32 0 554 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP25]], i32 0, i32 0 555 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 556 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to %struct.S** 557 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP29]], align 8 558 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 559 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to %struct.S** 560 // CHECK1-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP31]], align 8 561 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 562 // CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8 563 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1 564 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to %class.anon.0** 565 // CHECK1-NEXT: store %class.anon.0* [[TMP25]], %class.anon.0** [[TMP34]], align 8 566 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 1 567 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %class.anon.0** 568 // CHECK1-NEXT: store %class.anon.0* [[TMP25]], %class.anon.0** [[TMP36]], align 8 569 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 1 570 // CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8 571 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 2 572 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to %struct.S*** 573 // CHECK1-NEXT: store %struct.S** [[TMP26]], %struct.S*** [[TMP39]], align 8 574 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 2 575 // CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.S*** 576 // CHECK1-NEXT: store %struct.S** [[TMP27]], %struct.S*** [[TMP41]], align 8 577 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 2 578 // CHECK1-NEXT: store i8* null, i8** [[TMP42]], align 8 579 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 580 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 581 // CHECK1-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29.region_id, i32 3, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.6, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 582 // CHECK1-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 583 // CHECK1-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 584 // CHECK1: omp_offload.failed6: 585 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29(%struct.S* [[THIS1]], %class.anon.0* [[TMP25]]) #[[ATTR4]] 586 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT7]] 587 // CHECK1: omp_offload.cont7: 588 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 589 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4 590 // CHECK1-NEXT: [[TMP48:%.*]] = load %class.anon.0*, %class.anon.0** [[L]], align 8 591 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooIZN1S3fooEvEUlvE_EiRKT_(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP48]]) 592 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP47]], [[CALL]] 593 // CHECK1-NEXT: ret i32 [[ADD]] 594 // 595 // 596 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27 597 // CHECK1-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR1]] { 598 // CHECK1-NEXT: entry: 599 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 600 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 601 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 602 // CHECK1-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 603 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 604 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 605 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 606 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 607 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 608 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 609 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 610 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 611 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8* 612 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) 613 // CHECK1-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 614 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 615 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP5]]) 616 // CHECK1-NEXT: ret void 617 // 618 // 619 // CHECK1-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv 620 // CHECK1-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR3]] comdat align 2 { 621 // CHECK1-NEXT: entry: 622 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8 623 // CHECK1-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8 624 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8 625 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0 626 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8 627 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0 628 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 629 // CHECK1-NEXT: ret i32 [[TMP2]] 630 // 631 // 632 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29 633 // CHECK1-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR5]] { 634 // CHECK1-NEXT: entry: 635 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 636 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 637 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 638 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 639 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 640 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 641 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 642 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 643 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 644 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S*, %class.anon.0*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S* [[TMP0]], %class.anon.0* [[TMP2]]) 645 // CHECK1-NEXT: ret void 646 // 647 // 648 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5 649 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR1]] { 650 // CHECK1-NEXT: entry: 651 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 652 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 653 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 654 // CHECK1-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 655 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 656 // CHECK1-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 657 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 658 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 659 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 660 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 661 // CHECK1-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 662 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 663 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 664 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 665 // CHECK1-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 666 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 667 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8* 668 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) 669 // CHECK1-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 670 // CHECK1-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 671 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP5]]) 672 // CHECK1-NEXT: ret void 673 // 674 // 675 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIZN1S3fooEvEUlvE_EiRKT_ 676 // CHECK1-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] comdat { 677 // CHECK1-NEXT: entry: 678 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 679 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 680 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 681 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 682 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 683 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 684 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 685 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 686 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 687 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[TMP1]], i32 0, i32 0 688 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP1]], i32 0, i32 0 689 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 690 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to %class.anon.0** 691 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP5]], align 8 692 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 693 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %class.anon.0** 694 // CHECK1-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP7]], align 8 695 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 696 // CHECK1-NEXT: store i8* null, i8** [[TMP8]], align 8 697 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 698 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S*** 699 // CHECK1-NEXT: store %struct.S** [[TMP2]], %struct.S*** [[TMP10]], align 8 700 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 701 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S*** 702 // CHECK1-NEXT: store %struct.S** [[TMP3]], %struct.S*** [[TMP12]], align 8 703 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 704 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 705 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 706 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 707 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18.region_id, i32 2, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.9, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 708 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 709 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 710 // CHECK1: omp_offload.failed: 711 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18(%class.anon.0* [[TMP1]]) #[[ATTR4]] 712 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 713 // CHECK1: omp_offload.cont: 714 // CHECK1-NEXT: ret i32 0 715 // 716 // 717 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18 718 // CHECK1-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR5]] { 719 // CHECK1-NEXT: entry: 720 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 721 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 722 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 723 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 724 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 725 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 726 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %class.anon.0*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), %class.anon.0* [[TMP1]]) 727 // CHECK1-NEXT: ret void 728 // 729 // 730 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8 731 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR1]] { 732 // CHECK1-NEXT: entry: 733 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 734 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 735 // CHECK1-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 736 // CHECK1-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 737 // CHECK1-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 738 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 739 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 740 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 741 // CHECK1-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 742 // CHECK1-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 743 // CHECK1-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 744 // CHECK1-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 745 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast %class.anon.0* [[T1]] to i8* 746 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[TMP1]] to i8* 747 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false) 748 // CHECK1-NEXT: store %class.anon.0* [[T1]], %class.anon.0** [[_TMP2]], align 8 749 // CHECK1-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 750 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP4]]) 751 // CHECK1-NEXT: ret void 752 // 753 // 754 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 755 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] { 756 // CHECK1-NEXT: entry: 757 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 758 // CHECK1-NEXT: ret void 759 // 760 // 761 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27 762 // CHECK2-SAME: (%struct.S* [[THIS:%.*]], %class.anon* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0:[0-9]+]] { 763 // CHECK2-NEXT: entry: 764 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 765 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 766 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8 767 // CHECK2-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 768 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 769 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 770 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 771 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 772 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 773 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8 774 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true) 775 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 776 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 777 // CHECK2: user_code.entry: 778 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8 779 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast %class.anon* [[L1]] to i8* 780 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[TMP3]] to i8* 781 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP4]], i8* align 8 [[TMP5]], i64 8, i1 false) 782 // CHECK2-NEXT: store %class.anon* [[L1]], %class.anon** [[_TMP2]], align 8 783 // CHECK2-NEXT: [[TMP6:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 784 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP6]], i32 0, i32 0 785 // CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP7]], align 8 786 // CHECK2-NEXT: [[TMP8:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 787 // CHECK2-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* nonnull align 8 dereferenceable(8) [[TMP8]]) #[[ATTR6:[0-9]+]] 788 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 789 // CHECK2-NEXT: ret void 790 // CHECK2: worker.exit: 791 // CHECK2-NEXT: ret void 792 // 793 // 794 // CHECK2-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv 795 // CHECK2-SAME: (%class.anon* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 { 796 // CHECK2-NEXT: entry: 797 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon*, align 8 798 // CHECK2-NEXT: store %class.anon* [[THIS]], %class.anon** [[THIS_ADDR]], align 8 799 // CHECK2-NEXT: [[THIS1:%.*]] = load %class.anon*, %class.anon** [[THIS_ADDR]], align 8 800 // CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON:%.*]], %class.anon* [[THIS1]], i32 0, i32 0 801 // CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8 802 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0 803 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 804 // CHECK2-NEXT: ret i32 [[TMP2]] 805 // 806 // 807 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29 808 // CHECK2-SAME: (%struct.S* [[THIS:%.*]], %class.anon* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3:[0-9]+]] { 809 // CHECK2-NEXT: entry: 810 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 811 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 812 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8 813 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8 814 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 815 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 816 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 817 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 818 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8 819 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 820 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 821 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 822 // CHECK2: user_code.entry: 823 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 824 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8 825 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 826 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %struct.S* [[TMP0]] to i8* 827 // CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 828 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 829 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast %class.anon* [[TMP4]] to i8* 830 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8 831 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 832 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.S*, %class.anon*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP9]], i64 2) 833 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 834 // CHECK2-NEXT: ret void 835 // CHECK2: worker.exit: 836 // CHECK2-NEXT: ret void 837 // 838 // 839 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__ 840 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S* [[THIS:%.*]], %class.anon* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] { 841 // CHECK2-NEXT: entry: 842 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 843 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 844 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 845 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 846 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8 847 // CHECK2-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 848 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 849 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 850 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 851 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 852 // CHECK2-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 853 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 854 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 855 // CHECK2-NEXT: store %class.anon* [[TMP1]], %class.anon** [[TMP]], align 8 856 // CHECK2-NEXT: [[TMP2:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8 857 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast %class.anon* [[L1]] to i8* 858 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast %class.anon* [[TMP2]] to i8* 859 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) 860 // CHECK2-NEXT: store %class.anon* [[L1]], %class.anon** [[_TMP2]], align 8 861 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 862 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP5]], i32 0, i32 0 863 // CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8 864 // CHECK2-NEXT: [[TMP7:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 865 // CHECK2-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* nonnull align 8 dereferenceable(8) [[TMP7]]) #[[ATTR6]] 866 // CHECK2-NEXT: ret void 867 // 868 // 869 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41 870 // CHECK2-SAME: (i64 [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] { 871 // CHECK2-NEXT: entry: 872 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8 873 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 874 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 875 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 876 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 877 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 878 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8 879 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 880 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 881 // CHECK2-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 882 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca %class.anon.0*, align 8 883 // CHECK2-NEXT: [[B5:%.*]] = alloca i32, align 4 884 // CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8 885 // CHECK2-NEXT: [[C7:%.*]] = alloca i32, align 4 886 // CHECK2-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8 887 // CHECK2-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8 888 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 889 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 890 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 891 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 892 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 893 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32* 894 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8 895 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8 896 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 897 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 898 // CHECK2-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8 899 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8 900 // CHECK2-NEXT: store %class.anon.0* [[TMP3]], %class.anon.0** [[_TMP2]], align 8 901 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true) 902 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1 903 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 904 // CHECK2: user_code.entry: 905 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 906 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %class.anon.0* [[L3]] to i8* 907 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast %class.anon.0* [[TMP5]] to i8* 908 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 909 // CHECK2-NEXT: store %class.anon.0* [[L3]], %class.anon.0** [[_TMP4]], align 8 910 // CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8 911 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 912 // CHECK2-NEXT: store i32 [[TMP9]], i32* [[B5]], align 4 913 // CHECK2-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8 914 // CHECK2-NEXT: [[TMP10:%.*]] = load i32*, i32** [[_TMP1]], align 8 915 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4 916 // CHECK2-NEXT: store i32 [[TMP11]], i32* [[C7]], align 4 917 // CHECK2-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8 918 // CHECK2-NEXT: [[TMP12:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8 919 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 0 920 // CHECK2-NEXT: store i32* [[CONV]], i32** [[TMP13]], align 8 921 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 1 922 // CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP6]], align 8 923 // CHECK2-NEXT: store i32* [[TMP15]], i32** [[TMP14]], align 8 924 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 2 925 // CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP8]], align 8 926 // CHECK2-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 927 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 3 928 // CHECK2-NEXT: store i32** [[D_ADDR]], i32*** [[TMP18]], align 8 929 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP12]], i32 0, i32 4 930 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[TMP19]], align 8 931 // CHECK2-NEXT: [[TMP20:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8 932 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon.0* nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR6]] 933 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 934 // CHECK2-NEXT: ret void 935 // CHECK2: worker.exit: 936 // CHECK2-NEXT: ret void 937 // 938 // 939 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43 940 // CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3]] { 941 // CHECK2-NEXT: entry: 942 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 943 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 944 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 945 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 946 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 947 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 948 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8 949 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 950 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 951 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [6 x i8*], align 8 952 // CHECK2-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 953 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 954 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 955 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 956 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 957 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 958 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 959 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 960 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 961 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 962 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 963 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 964 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 965 // CHECK2-NEXT: store %class.anon.0* [[TMP4]], %class.anon.0** [[_TMP2]], align 8 966 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 967 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1 968 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 969 // CHECK2: user_code.entry: 970 // CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 971 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8 972 // CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8 973 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[D_ADDR]], align 8 974 // CHECK2-NEXT: [[TMP10:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 975 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 976 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to i8* 977 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 978 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 979 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP7]] to i8* 980 // CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8 981 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 982 // CHECK2-NEXT: [[TMP16:%.*]] = bitcast i32* [[TMP8]] to i8* 983 // CHECK2-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8 984 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3 985 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP9]] to i8* 986 // CHECK2-NEXT: store i8* [[TMP18]], i8** [[TMP17]], align 8 987 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4 988 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP3]] to i8* 989 // CHECK2-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8 990 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 5 991 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast %class.anon.0* [[TMP10]] to i8* 992 // CHECK2-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8 993 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast [6 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 994 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP23]], i64 6) 995 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 996 // CHECK2-NEXT: ret void 997 // CHECK2: worker.exit: 998 // CHECK2-NEXT: ret void 999 // 1000 // 1001 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1 1002 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon.0* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] { 1003 // CHECK2-NEXT: entry: 1004 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1005 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1006 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 1007 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1008 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1009 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1010 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1011 // CHECK2-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1012 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1013 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1014 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1015 // CHECK2-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1016 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca %class.anon.0*, align 8 1017 // CHECK2-NEXT: [[ARGC5:%.*]] = alloca i32, align 4 1018 // CHECK2-NEXT: [[B6:%.*]] = alloca i32, align 4 1019 // CHECK2-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8 1020 // CHECK2-NEXT: [[C8:%.*]] = alloca i32, align 4 1021 // CHECK2-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8 1022 // CHECK2-NEXT: [[A10:%.*]] = alloca i32, align 4 1023 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1024 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1025 // CHECK2-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 1026 // CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1027 // CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1028 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1029 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1030 // CHECK2-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1031 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 1032 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1033 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1034 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1035 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1036 // CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1037 // CHECK2-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 1038 // CHECK2-NEXT: store %class.anon.0* [[TMP4]], %class.anon.0** [[_TMP2]], align 8 1039 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1040 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast %class.anon.0* [[L3]] to i8* 1041 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast %class.anon.0* [[TMP5]] to i8* 1042 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 1043 // CHECK2-NEXT: store %class.anon.0* [[L3]], %class.anon.0** [[_TMP4]], align 8 1044 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 1045 // CHECK2-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4 1046 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8 1047 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1048 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4 1049 // CHECK2-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8 1050 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8 1051 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1052 // CHECK2-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4 1053 // CHECK2-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8 1054 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4 1055 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4 1056 // CHECK2-NEXT: [[TMP14:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8 1057 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 0 1058 // CHECK2-NEXT: store i32* [[ARGC5]], i32** [[TMP15]], align 8 1059 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 1 1060 // CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP7]], align 8 1061 // CHECK2-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 1062 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 2 1063 // CHECK2-NEXT: [[TMP19:%.*]] = load i32*, i32** [[_TMP9]], align 8 1064 // CHECK2-NEXT: store i32* [[TMP19]], i32** [[TMP18]], align 8 1065 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 3 1066 // CHECK2-NEXT: store i32** [[D_ADDR]], i32*** [[TMP20]], align 8 1067 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP14]], i32 0, i32 4 1068 // CHECK2-NEXT: store i32* [[A10]], i32** [[TMP21]], align 8 1069 // CHECK2-NEXT: [[TMP22:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP4]], align 8 1070 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon.0* nonnull align 8 dereferenceable(40) [[TMP22]]) #[[ATTR6]] 1071 // CHECK2-NEXT: ret void 1072 // 1073 // 1074 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18 1075 // CHECK2-SAME: (%class.anon* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] { 1076 // CHECK2-NEXT: entry: 1077 // CHECK2-NEXT: [[T_ADDR:%.*]] = alloca %class.anon*, align 8 1078 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8 1079 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 1080 // CHECK2-NEXT: store %class.anon* [[T]], %class.anon** [[T_ADDR]], align 8 1081 // CHECK2-NEXT: [[TMP0:%.*]] = load %class.anon*, %class.anon** [[T_ADDR]], align 8 1082 // CHECK2-NEXT: store %class.anon* [[TMP0]], %class.anon** [[TMP]], align 8 1083 // CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1084 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 1085 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1086 // CHECK2: user_code.entry: 1087 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 1088 // CHECK2-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8 1089 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1090 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast %class.anon* [[TMP3]] to i8* 1091 // CHECK2-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 1092 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1093 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %class.anon*)* @__omp_outlined__2 to i8*), i8* null, i8** [[TMP6]], i64 1) 1094 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1095 // CHECK2-NEXT: ret void 1096 // CHECK2: worker.exit: 1097 // CHECK2-NEXT: ret void 1098 // 1099 // 1100 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2 1101 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %class.anon* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR0]] { 1102 // CHECK2-NEXT: entry: 1103 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1104 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1105 // CHECK2-NEXT: [[T_ADDR:%.*]] = alloca %class.anon*, align 8 1106 // CHECK2-NEXT: [[TMP:%.*]] = alloca %class.anon*, align 8 1107 // CHECK2-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1108 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1109 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1110 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1111 // CHECK2-NEXT: store %class.anon* [[T]], %class.anon** [[T_ADDR]], align 8 1112 // CHECK2-NEXT: [[TMP0:%.*]] = load %class.anon*, %class.anon** [[T_ADDR]], align 8 1113 // CHECK2-NEXT: store %class.anon* [[TMP0]], %class.anon** [[TMP]], align 8 1114 // CHECK2-NEXT: [[TMP1:%.*]] = load %class.anon*, %class.anon** [[TMP]], align 8 1115 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast %class.anon* [[T1]] to i8* 1116 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast %class.anon* [[TMP1]] to i8* 1117 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false) 1118 // CHECK2-NEXT: store %class.anon* [[T1]], %class.anon** [[_TMP2]], align 8 1119 // CHECK2-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1120 // CHECK2-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1121 // CHECK2-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon* nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR6]] 1122 // CHECK2-NEXT: ret void 1123 // 1124 // 1125 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41 1126 // CHECK3-SAME: (i64 [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0:[0-9]+]] { 1127 // CHECK3-NEXT: entry: 1128 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8 1129 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1130 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1131 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1132 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1133 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1134 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1135 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1136 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1137 // CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1138 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 1139 // CHECK3-NEXT: [[B5:%.*]] = alloca i32, align 4 1140 // CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8 1141 // CHECK3-NEXT: [[C7:%.*]] = alloca i32, align 4 1142 // CHECK3-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8 1143 // CHECK3-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8 1144 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1145 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1146 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1147 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1148 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1149 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32* 1150 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1151 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1152 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1153 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1154 // CHECK3-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8 1155 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8 1156 // CHECK3-NEXT: store %class.anon* [[TMP3]], %class.anon** [[_TMP2]], align 8 1157 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true) 1158 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1 1159 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1160 // CHECK3: user_code.entry: 1161 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1162 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8* 1163 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8* 1164 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 1165 // CHECK3-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 1166 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8 1167 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 1168 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[B5]], align 4 1169 // CHECK3-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8 1170 // CHECK3-NEXT: [[TMP10:%.*]] = load i32*, i32** [[_TMP1]], align 8 1171 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4 1172 // CHECK3-NEXT: store i32 [[TMP11]], i32* [[C7]], align 4 1173 // CHECK3-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8 1174 // CHECK3-NEXT: [[TMP12:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1175 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 0 1176 // CHECK3-NEXT: store i32* [[CONV]], i32** [[TMP13]], align 8 1177 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 1 1178 // CHECK3-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP6]], align 8 1179 // CHECK3-NEXT: store i32* [[TMP15]], i32** [[TMP14]], align 8 1180 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 2 1181 // CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP8]], align 8 1182 // CHECK3-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 1183 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 3 1184 // CHECK3-NEXT: store i32** [[D_ADDR]], i32*** [[TMP18]], align 8 1185 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 4 1186 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[TMP19]], align 8 1187 // CHECK3-NEXT: [[TMP20:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1188 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR6:[0-9]+]] 1189 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 1190 // CHECK3-NEXT: ret void 1191 // CHECK3: worker.exit: 1192 // CHECK3-NEXT: ret void 1193 // 1194 // 1195 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43 1196 // CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3:[0-9]+]] { 1197 // CHECK3-NEXT: entry: 1198 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 1199 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1200 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1201 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1202 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1203 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1204 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1205 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1206 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1207 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [6 x i8*], align 8 1208 // CHECK3-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 1209 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1210 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1211 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1212 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1213 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1214 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 1215 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1216 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1217 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1218 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1219 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1220 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 1221 // CHECK3-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 1222 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1223 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1 1224 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1225 // CHECK3: user_code.entry: 1226 // CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 1227 // CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8 1228 // CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8 1229 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[D_ADDR]], align 8 1230 // CHECK3-NEXT: [[TMP10:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1231 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1232 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to i8* 1233 // CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 1234 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 1235 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP7]] to i8* 1236 // CHECK3-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8 1237 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 1238 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i32* [[TMP8]] to i8* 1239 // CHECK3-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8 1240 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3 1241 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP9]] to i8* 1242 // CHECK3-NEXT: store i8* [[TMP18]], i8** [[TMP17]], align 8 1243 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4 1244 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP3]] to i8* 1245 // CHECK3-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8 1246 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 5 1247 // CHECK3-NEXT: [[TMP22:%.*]] = bitcast %class.anon* [[TMP10]] to i8* 1248 // CHECK3-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8 1249 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast [6 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1250 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP23]], i64 6) 1251 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1252 // CHECK3-NEXT: ret void 1253 // CHECK3: worker.exit: 1254 // CHECK3-NEXT: ret void 1255 // 1256 // 1257 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__ 1258 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] { 1259 // CHECK3-NEXT: entry: 1260 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1261 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1262 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 1263 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1264 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1265 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1266 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1267 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1268 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1269 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1270 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1271 // CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1272 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 1273 // CHECK3-NEXT: [[ARGC5:%.*]] = alloca i32, align 4 1274 // CHECK3-NEXT: [[B6:%.*]] = alloca i32, align 4 1275 // CHECK3-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8 1276 // CHECK3-NEXT: [[C8:%.*]] = alloca i32, align 4 1277 // CHECK3-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8 1278 // CHECK3-NEXT: [[A10:%.*]] = alloca i32, align 4 1279 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1280 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1281 // CHECK3-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 1282 // CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1283 // CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1284 // CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1285 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1286 // CHECK3-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1287 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 1288 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1289 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1290 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1291 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1292 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1293 // CHECK3-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 1294 // CHECK3-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 1295 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1296 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8* 1297 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8* 1298 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 1299 // CHECK3-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 1300 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 1301 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4 1302 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8 1303 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1304 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4 1305 // CHECK3-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8 1306 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8 1307 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1308 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4 1309 // CHECK3-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8 1310 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4 1311 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4 1312 // CHECK3-NEXT: [[TMP14:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1313 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 0 1314 // CHECK3-NEXT: store i32* [[ARGC5]], i32** [[TMP15]], align 8 1315 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 1 1316 // CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP7]], align 8 1317 // CHECK3-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 1318 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 2 1319 // CHECK3-NEXT: [[TMP19:%.*]] = load i32*, i32** [[_TMP9]], align 8 1320 // CHECK3-NEXT: store i32* [[TMP19]], i32** [[TMP18]], align 8 1321 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 3 1322 // CHECK3-NEXT: store i32** [[D_ADDR]], i32*** [[TMP20]], align 8 1323 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 4 1324 // CHECK3-NEXT: store i32* [[A10]], i32** [[TMP21]], align 8 1325 // CHECK3-NEXT: [[TMP22:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1326 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP22]]) #[[ATTR6]] 1327 // CHECK3-NEXT: ret void 1328 // 1329 // 1330 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27 1331 // CHECK3-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] { 1332 // CHECK3-NEXT: entry: 1333 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1334 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1335 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1336 // CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1337 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1338 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1339 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1340 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1341 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1342 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1343 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true) 1344 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 1345 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1346 // CHECK3: user_code.entry: 1347 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1348 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 1349 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8* 1350 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP4]], i8* align 8 [[TMP5]], i64 8, i1 false) 1351 // CHECK3-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 1352 // CHECK3-NEXT: [[TMP6:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1353 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP6]], i32 0, i32 0 1354 // CHECK3-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP7]], align 8 1355 // CHECK3-NEXT: [[TMP8:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1356 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP8]]) #[[ATTR6]] 1357 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 1358 // CHECK3-NEXT: ret void 1359 // CHECK3: worker.exit: 1360 // CHECK3-NEXT: ret void 1361 // 1362 // 1363 // CHECK3-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv 1364 // CHECK3-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 { 1365 // CHECK3-NEXT: entry: 1366 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8 1367 // CHECK3-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8 1368 // CHECK3-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8 1369 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0 1370 // CHECK3-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8 1371 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0 1372 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1373 // CHECK3-NEXT: ret i32 [[TMP2]] 1374 // 1375 // 1376 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29 1377 // CHECK3-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3]] { 1378 // CHECK3-NEXT: entry: 1379 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1380 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1381 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1382 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8 1383 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1384 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1385 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1386 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1387 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1388 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1389 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 1390 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1391 // CHECK3: user_code.entry: 1392 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 1393 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1394 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1395 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast %struct.S* [[TMP0]] to i8* 1396 // CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 1397 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 1398 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast %class.anon.0* [[TMP4]] to i8* 1399 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8 1400 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1401 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.S*, %class.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP9]], i64 2) 1402 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1403 // CHECK3-NEXT: ret void 1404 // CHECK3: worker.exit: 1405 // CHECK3-NEXT: ret void 1406 // 1407 // 1408 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1 1409 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] { 1410 // CHECK3-NEXT: entry: 1411 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1412 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1413 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1414 // CHECK3-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1415 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1416 // CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1417 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1418 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1419 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1420 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1421 // CHECK3-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1422 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1423 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1424 // CHECK3-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1425 // CHECK3-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1426 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 1427 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8* 1428 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) 1429 // CHECK3-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 1430 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1431 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP5]], i32 0, i32 0 1432 // CHECK3-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8 1433 // CHECK3-NEXT: [[TMP7:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1434 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP7]]) #[[ATTR6]] 1435 // CHECK3-NEXT: ret void 1436 // 1437 // 1438 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18 1439 // CHECK3-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] { 1440 // CHECK3-NEXT: entry: 1441 // CHECK3-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 1442 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1443 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 1444 // CHECK3-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 1445 // CHECK3-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 1446 // CHECK3-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 1447 // CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1448 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 1449 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1450 // CHECK3: user_code.entry: 1451 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 1452 // CHECK3-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1453 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1454 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8* 1455 // CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 1456 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1457 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %class.anon.0*)* @__omp_outlined__2 to i8*), i8* null, i8** [[TMP6]], i64 1) 1458 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1459 // CHECK3-NEXT: ret void 1460 // CHECK3: worker.exit: 1461 // CHECK3-NEXT: ret void 1462 // 1463 // 1464 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__2 1465 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR0]] { 1466 // CHECK3-NEXT: entry: 1467 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1468 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1469 // CHECK3-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 1470 // CHECK3-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1471 // CHECK3-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1472 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1473 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1474 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1475 // CHECK3-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 1476 // CHECK3-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 1477 // CHECK3-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 1478 // CHECK3-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1479 // CHECK3-NEXT: [[TMP2:%.*]] = bitcast %class.anon.0* [[T1]] to i8* 1480 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[TMP1]] to i8* 1481 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false) 1482 // CHECK3-NEXT: store %class.anon.0* [[T1]], %class.anon.0** [[_TMP2]], align 8 1483 // CHECK3-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1484 // CHECK3-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1485 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR6]] 1486 // CHECK3-NEXT: ret void 1487 // 1488 // 1489 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41 1490 // CHECK4-SAME: (i64 [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0:[0-9]+]] { 1491 // CHECK4-NEXT: entry: 1492 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8 1493 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1494 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1495 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1496 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1497 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1498 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1499 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1500 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1501 // CHECK4-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1502 // CHECK4-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 1503 // CHECK4-NEXT: [[B5:%.*]] = alloca i32, align 4 1504 // CHECK4-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8 1505 // CHECK4-NEXT: [[C7:%.*]] = alloca i32, align 4 1506 // CHECK4-NEXT: [[_TMP8:%.*]] = alloca i32*, align 8 1507 // CHECK4-NEXT: store i64 [[ARGC]], i64* [[ARGC_ADDR]], align 8 1508 // CHECK4-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1509 // CHECK4-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1510 // CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1511 // CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1512 // CHECK4-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1513 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[ARGC_ADDR]] to i32* 1514 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1515 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1516 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1517 // CHECK4-NEXT: [[TMP3:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1518 // CHECK4-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8 1519 // CHECK4-NEXT: store i32* [[TMP1]], i32** [[_TMP1]], align 8 1520 // CHECK4-NEXT: store %class.anon* [[TMP3]], %class.anon** [[_TMP2]], align 8 1521 // CHECK4-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true) 1522 // CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1 1523 // CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1524 // CHECK4: user_code.entry: 1525 // CHECK4-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1526 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8* 1527 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8* 1528 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 1529 // CHECK4-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 1530 // CHECK4-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8 1531 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 1532 // CHECK4-NEXT: store i32 [[TMP9]], i32* [[B5]], align 4 1533 // CHECK4-NEXT: store i32* [[B5]], i32** [[_TMP6]], align 8 1534 // CHECK4-NEXT: [[TMP10:%.*]] = load i32*, i32** [[_TMP1]], align 8 1535 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4 1536 // CHECK4-NEXT: store i32 [[TMP11]], i32* [[C7]], align 4 1537 // CHECK4-NEXT: store i32* [[C7]], i32** [[_TMP8]], align 8 1538 // CHECK4-NEXT: [[TMP12:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1539 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 0 1540 // CHECK4-NEXT: store i32* [[CONV]], i32** [[TMP13]], align 8 1541 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 1 1542 // CHECK4-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP6]], align 8 1543 // CHECK4-NEXT: store i32* [[TMP15]], i32** [[TMP14]], align 8 1544 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 2 1545 // CHECK4-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP8]], align 8 1546 // CHECK4-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 1547 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 3 1548 // CHECK4-NEXT: store i32** [[D_ADDR]], i32*** [[TMP18]], align 8 1549 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP12]], i32 0, i32 4 1550 // CHECK4-NEXT: store i32* [[TMP2]], i32** [[TMP19]], align 8 1551 // CHECK4-NEXT: [[TMP20:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1552 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR6:[0-9]+]] 1553 // CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 1554 // CHECK4-NEXT: ret void 1555 // CHECK4: worker.exit: 1556 // CHECK4-NEXT: ret void 1557 // 1558 // 1559 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43 1560 // CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3:[0-9]+]] { 1561 // CHECK4-NEXT: entry: 1562 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 1563 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1564 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1565 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1566 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1567 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1568 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1569 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1570 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1571 // CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [6 x i8*], align 8 1572 // CHECK4-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 1573 // CHECK4-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1574 // CHECK4-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1575 // CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1576 // CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1577 // CHECK4-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1578 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 1579 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1580 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1581 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1582 // CHECK4-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1583 // CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1584 // CHECK4-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 1585 // CHECK4-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 1586 // CHECK4-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1587 // CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1 1588 // CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1589 // CHECK4: user_code.entry: 1590 // CHECK4-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 1591 // CHECK4-NEXT: [[TMP7:%.*]] = load i32*, i32** [[TMP]], align 8 1592 // CHECK4-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP1]], align 8 1593 // CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[D_ADDR]], align 8 1594 // CHECK4-NEXT: [[TMP10:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1595 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1596 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to i8* 1597 // CHECK4-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 1598 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 1599 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP7]] to i8* 1600 // CHECK4-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8 1601 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 1602 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast i32* [[TMP8]] to i8* 1603 // CHECK4-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8 1604 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 3 1605 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP9]] to i8* 1606 // CHECK4-NEXT: store i8* [[TMP18]], i8** [[TMP17]], align 8 1607 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 4 1608 // CHECK4-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP3]] to i8* 1609 // CHECK4-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8 1610 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 5 1611 // CHECK4-NEXT: [[TMP22:%.*]] = bitcast %class.anon* [[TMP10]] to i8* 1612 // CHECK4-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8 1613 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast [6 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1614 // CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*, i32*, i32*, i32*, %class.anon*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP23]], i64 6) 1615 // CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1616 // CHECK4-NEXT: ret void 1617 // CHECK4: worker.exit: 1618 // CHECK4-NEXT: ret void 1619 // 1620 // 1621 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__ 1622 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[ARGC:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]], i32* [[D:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], %class.anon* nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] { 1623 // CHECK4-NEXT: entry: 1624 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1625 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1626 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32*, align 8 1627 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8 1628 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8 1629 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 1630 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 1631 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon*, align 8 1632 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1633 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8 1634 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon*, align 8 1635 // CHECK4-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1636 // CHECK4-NEXT: [[_TMP4:%.*]] = alloca %class.anon*, align 8 1637 // CHECK4-NEXT: [[ARGC5:%.*]] = alloca i32, align 4 1638 // CHECK4-NEXT: [[B6:%.*]] = alloca i32, align 4 1639 // CHECK4-NEXT: [[_TMP7:%.*]] = alloca i32*, align 8 1640 // CHECK4-NEXT: [[C8:%.*]] = alloca i32, align 4 1641 // CHECK4-NEXT: [[_TMP9:%.*]] = alloca i32*, align 8 1642 // CHECK4-NEXT: [[A10:%.*]] = alloca i32, align 4 1643 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1644 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1645 // CHECK4-NEXT: store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8 1646 // CHECK4-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8 1647 // CHECK4-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8 1648 // CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 1649 // CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 1650 // CHECK4-NEXT: store %class.anon* [[L]], %class.anon** [[L_ADDR]], align 8 1651 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARGC_ADDR]], align 8 1652 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[B_ADDR]], align 8 1653 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[C_ADDR]], align 8 1654 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 1655 // CHECK4-NEXT: [[TMP4:%.*]] = load %class.anon*, %class.anon** [[L_ADDR]], align 8 1656 // CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1657 // CHECK4-NEXT: store i32* [[TMP2]], i32** [[_TMP1]], align 8 1658 // CHECK4-NEXT: store %class.anon* [[TMP4]], %class.anon** [[_TMP2]], align 8 1659 // CHECK4-NEXT: [[TMP5:%.*]] = load %class.anon*, %class.anon** [[_TMP2]], align 8 1660 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast %class.anon* [[L3]] to i8* 1661 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast %class.anon* [[TMP5]] to i8* 1662 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP6]], i8* align 8 [[TMP7]], i64 40, i1 false) 1663 // CHECK4-NEXT: store %class.anon* [[L3]], %class.anon** [[_TMP4]], align 8 1664 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 1665 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[ARGC5]], align 4 1666 // CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP]], align 8 1667 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1668 // CHECK4-NEXT: store i32 [[TMP10]], i32* [[B6]], align 4 1669 // CHECK4-NEXT: store i32* [[B6]], i32** [[_TMP7]], align 8 1670 // CHECK4-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP1]], align 8 1671 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1672 // CHECK4-NEXT: store i32 [[TMP12]], i32* [[C8]], align 4 1673 // CHECK4-NEXT: store i32* [[C8]], i32** [[_TMP9]], align 8 1674 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP3]], align 4 1675 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[A10]], align 4 1676 // CHECK4-NEXT: [[TMP14:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1677 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 0 1678 // CHECK4-NEXT: store i32* [[ARGC5]], i32** [[TMP15]], align 8 1679 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 1 1680 // CHECK4-NEXT: [[TMP17:%.*]] = load i32*, i32** [[_TMP7]], align 8 1681 // CHECK4-NEXT: store i32* [[TMP17]], i32** [[TMP16]], align 8 1682 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 2 1683 // CHECK4-NEXT: [[TMP19:%.*]] = load i32*, i32** [[_TMP9]], align 8 1684 // CHECK4-NEXT: store i32* [[TMP19]], i32** [[TMP18]], align 8 1685 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 3 1686 // CHECK4-NEXT: store i32** [[D_ADDR]], i32*** [[TMP20]], align 8 1687 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[TMP14]], i32 0, i32 4 1688 // CHECK4-NEXT: store i32* [[A10]], i32** [[TMP21]], align 8 1689 // CHECK4-NEXT: [[TMP22:%.*]] = load %class.anon*, %class.anon** [[_TMP4]], align 8 1690 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(40) [[TMP22]]) #[[ATTR6]] 1691 // CHECK4-NEXT: ret void 1692 // 1693 // 1694 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27 1695 // CHECK4-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] { 1696 // CHECK4-NEXT: entry: 1697 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1698 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1699 // CHECK4-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1700 // CHECK4-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1701 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1702 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1703 // CHECK4-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1704 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1705 // CHECK4-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1706 // CHECK4-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1707 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true) 1708 // CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 1709 // CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1710 // CHECK4: user_code.entry: 1711 // CHECK4-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1712 // CHECK4-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 1713 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8* 1714 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP4]], i8* align 8 [[TMP5]], i64 8, i1 false) 1715 // CHECK4-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 1716 // CHECK4-NEXT: [[TMP6:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1717 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP6]], i32 0, i32 0 1718 // CHECK4-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP7]], align 8 1719 // CHECK4-NEXT: [[TMP8:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1720 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP8]]) #[[ATTR6]] 1721 // CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true) 1722 // CHECK4-NEXT: ret void 1723 // CHECK4: worker.exit: 1724 // CHECK4-NEXT: ret void 1725 // 1726 // 1727 // CHECK4-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv 1728 // CHECK4-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 { 1729 // CHECK4-NEXT: entry: 1730 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8 1731 // CHECK4-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8 1732 // CHECK4-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8 1733 // CHECK4-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0 1734 // CHECK4-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[TMP0]], align 8 1735 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP1]], i32 0, i32 0 1736 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1737 // CHECK4-NEXT: ret i32 [[TMP2]] 1738 // 1739 // 1740 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29 1741 // CHECK4-SAME: (%struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3]] { 1742 // CHECK4-NEXT: entry: 1743 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1744 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1745 // CHECK4-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1746 // CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8 1747 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1748 // CHECK4-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1749 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1750 // CHECK4-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1751 // CHECK4-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1752 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1753 // CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1 1754 // CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1755 // CHECK4: user_code.entry: 1756 // CHECK4-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 1757 // CHECK4-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1758 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1759 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast %struct.S* [[TMP0]] to i8* 1760 // CHECK4-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 1761 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 1762 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast %class.anon.0* [[TMP4]] to i8* 1763 // CHECK4-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8 1764 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1765 // CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %struct.S*, %class.anon.0*)* @__omp_outlined__1 to i8*), i8* null, i8** [[TMP9]], i64 2) 1766 // CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1767 // CHECK4-NEXT: ret void 1768 // CHECK4: worker.exit: 1769 // CHECK4-NEXT: ret void 1770 // 1771 // 1772 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__1 1773 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S* [[THIS:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] { 1774 // CHECK4-NEXT: entry: 1775 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1776 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1777 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1778 // CHECK4-NEXT: [[L_ADDR:%.*]] = alloca %class.anon.0*, align 8 1779 // CHECK4-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1780 // CHECK4-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1781 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1782 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1783 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1784 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1785 // CHECK4-NEXT: store %class.anon.0* [[L]], %class.anon.0** [[L_ADDR]], align 8 1786 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1787 // CHECK4-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[L_ADDR]], align 8 1788 // CHECK4-NEXT: store %class.anon.0* [[TMP1]], %class.anon.0** [[TMP]], align 8 1789 // CHECK4-NEXT: [[TMP2:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1790 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[L1]] to i8* 1791 // CHECK4-NEXT: [[TMP4:%.*]] = bitcast %class.anon.0* [[TMP2]] to i8* 1792 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) 1793 // CHECK4-NEXT: store %class.anon.0* [[L1]], %class.anon.0** [[_TMP2]], align 8 1794 // CHECK4-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1795 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[TMP5]], i32 0, i32 0 1796 // CHECK4-NEXT: store %struct.S* [[TMP0]], %struct.S** [[TMP6]], align 8 1797 // CHECK4-NEXT: [[TMP7:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1798 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP7]]) #[[ATTR6]] 1799 // CHECK4-NEXT: ret void 1800 // 1801 // 1802 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18 1803 // CHECK4-SAME: (%class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] { 1804 // CHECK4-NEXT: entry: 1805 // CHECK4-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 1806 // CHECK4-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1807 // CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 1808 // CHECK4-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 1809 // CHECK4-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 1810 // CHECK4-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 1811 // CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true) 1812 // CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 1813 // CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 1814 // CHECK4: user_code.entry: 1815 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 1816 // CHECK4-NEXT: [[TMP3:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1817 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 1818 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast %class.anon.0* [[TMP3]] to i8* 1819 // CHECK4-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 1820 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 1821 // CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, %class.anon.0*)* @__omp_outlined__2 to i8*), i8* null, i8** [[TMP6]], i64 1) 1822 // CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 1823 // CHECK4-NEXT: ret void 1824 // CHECK4: worker.exit: 1825 // CHECK4-NEXT: ret void 1826 // 1827 // 1828 // CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__2 1829 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %class.anon.0* nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR0]] { 1830 // CHECK4-NEXT: entry: 1831 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1832 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1833 // CHECK4-NEXT: [[T_ADDR:%.*]] = alloca %class.anon.0*, align 8 1834 // CHECK4-NEXT: [[TMP:%.*]] = alloca %class.anon.0*, align 8 1835 // CHECK4-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1836 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca %class.anon.0*, align 8 1837 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1838 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1839 // CHECK4-NEXT: store %class.anon.0* [[T]], %class.anon.0** [[T_ADDR]], align 8 1840 // CHECK4-NEXT: [[TMP0:%.*]] = load %class.anon.0*, %class.anon.0** [[T_ADDR]], align 8 1841 // CHECK4-NEXT: store %class.anon.0* [[TMP0]], %class.anon.0** [[TMP]], align 8 1842 // CHECK4-NEXT: [[TMP1:%.*]] = load %class.anon.0*, %class.anon.0** [[TMP]], align 8 1843 // CHECK4-NEXT: [[TMP2:%.*]] = bitcast %class.anon.0* [[T1]] to i8* 1844 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %class.anon.0* [[TMP1]] to i8* 1845 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 [[TMP3]], i64 8, i1 false) 1846 // CHECK4-NEXT: store %class.anon.0* [[T1]], %class.anon.0** [[_TMP2]], align 8 1847 // CHECK4-NEXT: [[TMP4:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1848 // CHECK4-NEXT: [[TMP5:%.*]] = load %class.anon.0*, %class.anon.0** [[_TMP2]], align 8 1849 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_ZZN1S3fooEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR6]] 1850 // CHECK4-NEXT: ret void 1851 // 1852