1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // Test host codegen. 3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 5 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4 9 10 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5 11 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 12 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 13 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7 14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 15 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8 16 17 // Test target codegen - host bc file has to be created first. 18 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 19 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9 20 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 21 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 22 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 23 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11 24 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 25 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 26 27 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 28 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK13 29 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 30 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14 31 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 32 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK15 33 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 34 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16 35 36 // Test host codegen. 37 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17 38 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 39 // RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18 40 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19 41 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 42 // RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK20 43 44 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK21 45 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 46 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK22 47 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK23 48 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 49 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK24 50 51 // Test target codegen - host bc file has to be created first. 52 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 53 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK25 54 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 55 // RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK26 56 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 57 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK27 58 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 59 // RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK28 60 61 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 62 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK29 63 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 64 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK30 65 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 66 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK31 67 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 68 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK32 69 70 // expected-no-diagnostics 71 #ifndef HEADER 72 #define HEADER 73 74 75 76 77 // We have 6 target regions 78 79 80 81 // Check target registration is registered as a Ctor. 82 83 84 template<typename tx> 85 tx ftemplate(int n) { 86 tx a = 0; 87 88 #pragma omp target teams thread_limit(tx(20)) 89 { 90 } 91 92 short b = 1; 93 #pragma omp target teams num_teams(b) thread_limit(1024) 94 { 95 a += b; 96 } 97 98 return a; 99 } 100 101 static 102 int fstatic(int n) { 103 104 #pragma omp target teams num_teams(n) thread_limit(n*32) 105 { 106 } 107 108 #pragma omp target teams thread_limit(32+n) 109 { 110 } 111 112 return n+1; 113 } 114 115 struct S1 { 116 double a; 117 118 int r1(int n){ 119 int b = 1; 120 121 #pragma omp target teams thread_limit(n-b) 122 { 123 this->a = (double)b + 1.5; 124 } 125 126 #pragma omp target teams thread_limit(1024) 127 { 128 this->a = 2.5; 129 } 130 131 return (int)a; 132 } 133 }; 134 135 int bar(int n){ 136 int a = 0; 137 138 S1 S; 139 a += S.r1(n); 140 141 a += fstatic(n); 142 143 a += ftemplate<int>(n); 144 145 return a; 146 } 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 // Check that the offloading functions are emitted and that the parallel function 169 // is appropriately guarded. 170 171 172 173 174 175 176 #endif 177 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari 178 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 179 // CHECK1-NEXT: entry: 180 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 181 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 182 // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 183 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 184 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 185 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 186 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 187 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 188 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 189 // CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4 190 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 191 // CHECK1-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 192 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 193 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 194 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 195 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 196 // CHECK1-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 197 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 198 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 199 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 200 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 201 // CHECK1-NEXT: ret i32 [[TMP6]] 202 // 203 // 204 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 205 // CHECK1-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 206 // CHECK1-NEXT: entry: 207 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 208 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 209 // CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 210 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 211 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 212 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 213 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 214 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 215 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 216 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8 217 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8 218 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8 219 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 220 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 221 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 222 // CHECK1-NEXT: store i32 1, i32* [[B]], align 4 223 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 224 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 225 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 226 // CHECK1-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 227 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 228 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 229 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 230 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[B_CASTED]], align 8 231 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 232 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 233 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV2]], align 4 234 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 235 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 236 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 237 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 238 // CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 8 239 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 240 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 241 // CHECK1-NEXT: store double* [[A]], double** [[TMP9]], align 8 242 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 243 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8 244 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 245 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 246 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP12]], align 8 247 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 248 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 249 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP14]], align 8 250 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 251 // CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 252 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 253 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 254 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP17]], align 8 255 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 256 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 257 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP19]], align 8 258 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 259 // CHECK1-NEXT: store i8* null, i8** [[TMP20]], align 8 260 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 261 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 262 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 263 // CHECK1-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 264 // CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 265 // CHECK1-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 266 // CHECK1: omp_offload.failed: 267 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2:[0-9]+]] 268 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 269 // CHECK1: omp_offload.cont: 270 // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 271 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 272 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 273 // CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 8 274 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 275 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 276 // CHECK1-NEXT: store double* [[A3]], double** [[TMP29]], align 8 277 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 278 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8 279 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 280 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 281 // CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 282 // CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 283 // CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 284 // CHECK1: omp_offload.failed7: 285 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 286 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]] 287 // CHECK1: omp_offload.cont8: 288 // CHECK1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 289 // CHECK1-NEXT: [[TMP35:%.*]] = load double, double* [[A9]], align 8 290 // CHECK1-NEXT: [[CONV10:%.*]] = fptosi double [[TMP35]] to i32 291 // CHECK1-NEXT: ret i32 [[CONV10]] 292 // 293 // 294 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici 295 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 296 // CHECK1-NEXT: entry: 297 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 298 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 299 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 300 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 301 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8 302 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 303 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 304 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 305 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 306 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i64, align 8 307 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 308 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 309 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 310 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 311 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 312 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 313 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 314 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 315 // CHECK1-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 316 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 317 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 318 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 319 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 320 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 321 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32* 322 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 323 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8 324 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 325 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 326 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP7]], align 8 327 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 328 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 329 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP9]], align 8 330 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 331 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8 332 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 333 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 334 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP12]], align 8 335 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 336 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 337 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP14]], align 8 338 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 339 // CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 340 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 341 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 342 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 343 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 344 // CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 345 // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 346 // CHECK1-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 347 // CHECK1: omp_offload.failed: 348 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2]] 349 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 350 // CHECK1: omp_offload.cont: 351 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 352 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 353 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_4]], align 4 354 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 355 // CHECK1-NEXT: [[CONV6:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5]] to i32* 356 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[CONV6]], align 4 357 // CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5]], align 8 358 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 359 // CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 360 // CHECK1-NEXT: store i64 [[TMP24]], i64* [[TMP26]], align 8 361 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 362 // CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 363 // CHECK1-NEXT: store i64 [[TMP24]], i64* [[TMP28]], align 8 364 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 365 // CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 366 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 367 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 368 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 369 // CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 370 // CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 371 // CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] 372 // CHECK1: omp_offload.failed10: 373 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i64 [[TMP24]]) #[[ATTR2]] 374 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT11]] 375 // CHECK1: omp_offload.cont11: 376 // CHECK1-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 377 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP35]], 1 378 // CHECK1-NEXT: ret i32 [[ADD12]] 379 // 380 // 381 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 382 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 383 // CHECK1-NEXT: entry: 384 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 385 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 386 // CHECK1-NEXT: [[B:%.*]] = alloca i16, align 2 387 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 388 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 389 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 390 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 391 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 392 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 393 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 394 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 395 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 396 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 397 // CHECK1-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 398 // CHECK1-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 399 // CHECK1: omp_offload.failed: 400 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 401 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 402 // CHECK1: omp_offload.cont: 403 // CHECK1-NEXT: store i16 1, i16* [[B]], align 2 404 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 405 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 406 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 407 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 408 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 409 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 410 // CHECK1-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 411 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_CASTED]] to i16* 412 // CHECK1-NEXT: store i16 [[TMP5]], i16* [[CONV1]], align 2 413 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 414 // CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 415 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i16* 416 // CHECK1-NEXT: store i16 [[TMP7]], i16* [[CONV2]], align 2 417 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 418 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 419 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 420 // CHECK1-NEXT: store i64 [[TMP4]], i64* [[TMP10]], align 8 421 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 422 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 423 // CHECK1-NEXT: store i64 [[TMP4]], i64* [[TMP12]], align 8 424 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 425 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 426 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 427 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 428 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 429 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 430 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 431 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 432 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 433 // CHECK1-NEXT: store i8* null, i8** [[TMP18]], align 8 434 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 435 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 436 // CHECK1-NEXT: store i64 [[TMP8]], i64* [[TMP20]], align 8 437 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 438 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 439 // CHECK1-NEXT: store i64 [[TMP8]], i64* [[TMP22]], align 8 440 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 441 // CHECK1-NEXT: store i8* null, i8** [[TMP23]], align 8 442 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 443 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 444 // CHECK1-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 445 // CHECK1-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 446 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 447 // CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 448 // CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] 449 // CHECK1: omp_offload.failed3: 450 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2]] 451 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] 452 // CHECK1: omp_offload.cont4: 453 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 454 // CHECK1-NEXT: ret i32 [[TMP30]] 455 // 456 // 457 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 458 // CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 459 // CHECK1-NEXT: entry: 460 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 461 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 462 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 463 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 464 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 465 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 466 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 467 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 468 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 469 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 470 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 471 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 472 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 473 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 474 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 475 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 476 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 477 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 478 // CHECK1-NEXT: ret void 479 // 480 // 481 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 482 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 483 // CHECK1-NEXT: entry: 484 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 485 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 486 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 487 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 488 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 489 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 490 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 491 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 492 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 493 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 494 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 495 // CHECK1-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 496 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 497 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 498 // CHECK1-NEXT: store double [[ADD]], double* [[A]], align 8 499 // CHECK1-NEXT: ret void 500 // 501 // 502 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 503 // CHECK1-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 504 // CHECK1-NEXT: entry: 505 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 506 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 507 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 508 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 509 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 510 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 511 // CHECK1-NEXT: ret void 512 // 513 // 514 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 515 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 516 // CHECK1-NEXT: entry: 517 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 518 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 519 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 520 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 521 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 522 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 523 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 524 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 525 // CHECK1-NEXT: store double 2.500000e+00, double* [[A]], align 8 526 // CHECK1-NEXT: ret void 527 // 528 // 529 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 530 // CHECK1-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 531 // CHECK1-NEXT: entry: 532 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 533 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 534 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 535 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 536 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 537 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 538 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 539 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 540 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 541 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 542 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 543 // CHECK1-NEXT: ret void 544 // 545 // 546 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 547 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 548 // CHECK1-NEXT: entry: 549 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 550 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 551 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 552 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 553 // CHECK1-NEXT: ret void 554 // 555 // 556 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 557 // CHECK1-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 558 // CHECK1-NEXT: entry: 559 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 560 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 561 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 562 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 563 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 564 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 565 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 566 // CHECK1-NEXT: ret void 567 // 568 // 569 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 570 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 571 // CHECK1-NEXT: entry: 572 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 573 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 574 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 575 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 576 // CHECK1-NEXT: ret void 577 // 578 // 579 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 580 // CHECK1-SAME: () #[[ATTR1]] { 581 // CHECK1-NEXT: entry: 582 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 583 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 584 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 585 // CHECK1-NEXT: ret void 586 // 587 // 588 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 589 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 590 // CHECK1-NEXT: entry: 591 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 592 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 593 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 594 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 595 // CHECK1-NEXT: ret void 596 // 597 // 598 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 599 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 600 // CHECK1-NEXT: entry: 601 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 602 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 603 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 604 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 605 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 606 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 607 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 608 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 609 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 610 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 611 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 612 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 613 // CHECK1-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 614 // CHECK1-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 615 // CHECK1-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 616 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 617 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 618 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 619 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 620 // CHECK1-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 621 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 622 // CHECK1-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 623 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 624 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 625 // CHECK1-NEXT: ret void 626 // 627 // 628 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 629 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 630 // CHECK1-NEXT: entry: 631 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 632 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 633 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 634 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 635 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 636 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 637 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 638 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 639 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 640 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 641 // CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 642 // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 643 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 644 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 645 // CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 646 // CHECK1-NEXT: ret void 647 // 648 // 649 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 650 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 651 // CHECK1-NEXT: entry: 652 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 653 // CHECK1-NEXT: ret void 654 // 655 // 656 // CHECK2-LABEL: define {{[^@]+}}@_Z3bari 657 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 658 // CHECK2-NEXT: entry: 659 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 660 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 661 // CHECK2-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 662 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 663 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 664 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 665 // CHECK2-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 666 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 667 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 668 // CHECK2-NEXT: store i32 [[ADD]], i32* [[A]], align 4 669 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 670 // CHECK2-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 671 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 672 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 673 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 674 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 675 // CHECK2-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 676 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 677 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 678 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 679 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 680 // CHECK2-NEXT: ret i32 [[TMP6]] 681 // 682 // 683 // CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 684 // CHECK2-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 685 // CHECK2-NEXT: entry: 686 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 687 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 688 // CHECK2-NEXT: [[B:%.*]] = alloca i32, align 4 689 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 690 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 691 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 692 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 693 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 694 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 695 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8 696 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8 697 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8 698 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 699 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 700 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 701 // CHECK2-NEXT: store i32 1, i32* [[B]], align 4 702 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 703 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 704 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 705 // CHECK2-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 706 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 707 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 708 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 709 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[B_CASTED]], align 8 710 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 711 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 712 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV2]], align 4 713 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 714 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 715 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 716 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 717 // CHECK2-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 8 718 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 719 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 720 // CHECK2-NEXT: store double* [[A]], double** [[TMP9]], align 8 721 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 722 // CHECK2-NEXT: store i8* null, i8** [[TMP10]], align 8 723 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 724 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 725 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP12]], align 8 726 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 727 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 728 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP14]], align 8 729 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 730 // CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 731 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 732 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 733 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP17]], align 8 734 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 735 // CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 736 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP19]], align 8 737 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 738 // CHECK2-NEXT: store i8* null, i8** [[TMP20]], align 8 739 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 740 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 741 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 742 // CHECK2-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 743 // CHECK2-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 744 // CHECK2-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 745 // CHECK2: omp_offload.failed: 746 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2:[0-9]+]] 747 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 748 // CHECK2: omp_offload.cont: 749 // CHECK2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 750 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 751 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 752 // CHECK2-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 8 753 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 754 // CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 755 // CHECK2-NEXT: store double* [[A3]], double** [[TMP29]], align 8 756 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 757 // CHECK2-NEXT: store i8* null, i8** [[TMP30]], align 8 758 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 759 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 760 // CHECK2-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 761 // CHECK2-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 762 // CHECK2-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 763 // CHECK2: omp_offload.failed7: 764 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 765 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT8]] 766 // CHECK2: omp_offload.cont8: 767 // CHECK2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 768 // CHECK2-NEXT: [[TMP35:%.*]] = load double, double* [[A9]], align 8 769 // CHECK2-NEXT: [[CONV10:%.*]] = fptosi double [[TMP35]] to i32 770 // CHECK2-NEXT: ret i32 [[CONV10]] 771 // 772 // 773 // CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici 774 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 775 // CHECK2-NEXT: entry: 776 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 777 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 778 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 779 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 780 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8 781 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 782 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 783 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 784 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 785 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i64, align 8 786 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 787 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 788 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 789 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 790 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 791 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 792 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 793 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 794 // CHECK2-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 795 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 796 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 797 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 798 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 799 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 800 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32* 801 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 802 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8 803 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 804 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 805 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP7]], align 8 806 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 807 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 808 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP9]], align 8 809 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 810 // CHECK2-NEXT: store i8* null, i8** [[TMP10]], align 8 811 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 812 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 813 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP12]], align 8 814 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 815 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 816 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP14]], align 8 817 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 818 // CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 819 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 820 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 821 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 822 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 823 // CHECK2-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 824 // CHECK2-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 825 // CHECK2-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 826 // CHECK2: omp_offload.failed: 827 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2]] 828 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 829 // CHECK2: omp_offload.cont: 830 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 831 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 832 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_4]], align 4 833 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 834 // CHECK2-NEXT: [[CONV6:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5]] to i32* 835 // CHECK2-NEXT: store i32 [[TMP23]], i32* [[CONV6]], align 4 836 // CHECK2-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5]], align 8 837 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 838 // CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 839 // CHECK2-NEXT: store i64 [[TMP24]], i64* [[TMP26]], align 8 840 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 841 // CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 842 // CHECK2-NEXT: store i64 [[TMP24]], i64* [[TMP28]], align 8 843 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 844 // CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 845 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 846 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 847 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 848 // CHECK2-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 849 // CHECK2-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 850 // CHECK2-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] 851 // CHECK2: omp_offload.failed10: 852 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i64 [[TMP24]]) #[[ATTR2]] 853 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT11]] 854 // CHECK2: omp_offload.cont11: 855 // CHECK2-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 856 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP35]], 1 857 // CHECK2-NEXT: ret i32 [[ADD12]] 858 // 859 // 860 // CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 861 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 862 // CHECK2-NEXT: entry: 863 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 864 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 865 // CHECK2-NEXT: [[B:%.*]] = alloca i16, align 2 866 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 867 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 868 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 869 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 870 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 871 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 872 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 873 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 874 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 875 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 876 // CHECK2-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 877 // CHECK2-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 878 // CHECK2: omp_offload.failed: 879 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 880 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 881 // CHECK2: omp_offload.cont: 882 // CHECK2-NEXT: store i16 1, i16* [[B]], align 2 883 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 884 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 885 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 886 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 887 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 888 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 889 // CHECK2-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 890 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_CASTED]] to i16* 891 // CHECK2-NEXT: store i16 [[TMP5]], i16* [[CONV1]], align 2 892 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 893 // CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 894 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i16* 895 // CHECK2-NEXT: store i16 [[TMP7]], i16* [[CONV2]], align 2 896 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 897 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 898 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 899 // CHECK2-NEXT: store i64 [[TMP4]], i64* [[TMP10]], align 8 900 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 901 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 902 // CHECK2-NEXT: store i64 [[TMP4]], i64* [[TMP12]], align 8 903 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 904 // CHECK2-NEXT: store i8* null, i8** [[TMP13]], align 8 905 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 906 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 907 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 908 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 909 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 910 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 911 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 912 // CHECK2-NEXT: store i8* null, i8** [[TMP18]], align 8 913 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 914 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 915 // CHECK2-NEXT: store i64 [[TMP8]], i64* [[TMP20]], align 8 916 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 917 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 918 // CHECK2-NEXT: store i64 [[TMP8]], i64* [[TMP22]], align 8 919 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 920 // CHECK2-NEXT: store i8* null, i8** [[TMP23]], align 8 921 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 922 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 923 // CHECK2-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 924 // CHECK2-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 925 // CHECK2-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 926 // CHECK2-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 927 // CHECK2-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] 928 // CHECK2: omp_offload.failed3: 929 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2]] 930 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT4]] 931 // CHECK2: omp_offload.cont4: 932 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 933 // CHECK2-NEXT: ret i32 [[TMP30]] 934 // 935 // 936 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 937 // CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 938 // CHECK2-NEXT: entry: 939 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 940 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 941 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 942 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 943 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 944 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 945 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 946 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 947 // CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 948 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 949 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 950 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 951 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 952 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 953 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 954 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 955 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 956 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 957 // CHECK2-NEXT: ret void 958 // 959 // 960 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 961 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 962 // CHECK2-NEXT: entry: 963 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 964 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 965 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 966 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 967 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 968 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 969 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 970 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 971 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 972 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 973 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 974 // CHECK2-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 975 // CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 976 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 977 // CHECK2-NEXT: store double [[ADD]], double* [[A]], align 8 978 // CHECK2-NEXT: ret void 979 // 980 // 981 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 982 // CHECK2-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 983 // CHECK2-NEXT: entry: 984 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 985 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 986 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 987 // CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 988 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 989 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 990 // CHECK2-NEXT: ret void 991 // 992 // 993 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 994 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 995 // CHECK2-NEXT: entry: 996 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 997 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 998 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 999 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1000 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1001 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1002 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1003 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1004 // CHECK2-NEXT: store double 2.500000e+00, double* [[A]], align 8 1005 // CHECK2-NEXT: ret void 1006 // 1007 // 1008 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 1009 // CHECK2-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 1010 // CHECK2-NEXT: entry: 1011 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1012 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 1013 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1014 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1015 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 1016 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1017 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 1018 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1019 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 1020 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 1021 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 1022 // CHECK2-NEXT: ret void 1023 // 1024 // 1025 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 1026 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1027 // CHECK2-NEXT: entry: 1028 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1029 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1030 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1031 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1032 // CHECK2-NEXT: ret void 1033 // 1034 // 1035 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 1036 // CHECK2-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 1037 // CHECK2-NEXT: entry: 1038 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1039 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1040 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1041 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1042 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1043 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 1044 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 1045 // CHECK2-NEXT: ret void 1046 // 1047 // 1048 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7 1049 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1050 // CHECK2-NEXT: entry: 1051 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1052 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1053 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1054 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1055 // CHECK2-NEXT: ret void 1056 // 1057 // 1058 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 1059 // CHECK2-SAME: () #[[ATTR1]] { 1060 // CHECK2-NEXT: entry: 1061 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1062 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 1063 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 1064 // CHECK2-NEXT: ret void 1065 // 1066 // 1067 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 1068 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1069 // CHECK2-NEXT: entry: 1070 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1071 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1072 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1073 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1074 // CHECK2-NEXT: ret void 1075 // 1076 // 1077 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 1078 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 1079 // CHECK2-NEXT: entry: 1080 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1081 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1082 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1083 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1084 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 1085 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1086 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1087 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1088 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1089 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1090 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 1091 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 1092 // CHECK2-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 1093 // CHECK2-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 1094 // CHECK2-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 1095 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 1096 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1097 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 1098 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 1099 // CHECK2-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 1100 // CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 1101 // CHECK2-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 1102 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 1103 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 1104 // CHECK2-NEXT: ret void 1105 // 1106 // 1107 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 1108 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 1109 // CHECK2-NEXT: entry: 1110 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1111 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1112 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1113 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1114 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1115 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1116 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1117 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1118 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1119 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 1120 // CHECK2-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 1121 // CHECK2-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 1122 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1123 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 1124 // CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 1125 // CHECK2-NEXT: ret void 1126 // 1127 // 1128 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1129 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] { 1130 // CHECK2-NEXT: entry: 1131 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 1132 // CHECK2-NEXT: ret void 1133 // 1134 // 1135 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari 1136 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 1137 // CHECK3-NEXT: entry: 1138 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1139 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 1140 // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 1141 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1142 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 1143 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1144 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 1145 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 1146 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 1147 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A]], align 4 1148 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 1149 // CHECK3-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 1150 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1151 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 1152 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 1153 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1154 // CHECK3-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 1155 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 1156 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 1157 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 1158 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 1159 // CHECK3-NEXT: ret i32 [[TMP6]] 1160 // 1161 // 1162 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 1163 // CHECK3-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 1164 // CHECK3-NEXT: entry: 1165 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1166 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1167 // CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 1168 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1169 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1170 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1171 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 1172 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 1173 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 1174 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 1175 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 1176 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 1177 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1178 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1179 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1180 // CHECK3-NEXT: store i32 1, i32* [[B]], align 4 1181 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1182 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 1183 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 1184 // CHECK3-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 1185 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 1186 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[B_CASTED]], align 4 1187 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_CASTED]], align 4 1188 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1189 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1190 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1191 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 1192 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1193 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 1194 // CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 4 1195 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1196 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 1197 // CHECK3-NEXT: store double* [[A]], double** [[TMP9]], align 4 1198 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1199 // CHECK3-NEXT: store i8* null, i8** [[TMP10]], align 4 1200 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1201 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1202 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP12]], align 4 1203 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1204 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 1205 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP14]], align 4 1206 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1207 // CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 1208 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1209 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 1210 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 1211 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1212 // CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 1213 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 1214 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 1215 // CHECK3-NEXT: store i8* null, i8** [[TMP20]], align 4 1216 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1217 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1218 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1219 // CHECK3-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 1220 // CHECK3-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 1221 // CHECK3-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1222 // CHECK3: omp_offload.failed: 1223 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2:[0-9]+]] 1224 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1225 // CHECK3: omp_offload.cont: 1226 // CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 1227 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1228 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 1229 // CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 4 1230 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1231 // CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 1232 // CHECK3-NEXT: store double* [[A2]], double** [[TMP29]], align 4 1233 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 1234 // CHECK3-NEXT: store i8* null, i8** [[TMP30]], align 4 1235 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1236 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1237 // CHECK3-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 1238 // CHECK3-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 1239 // CHECK3-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 1240 // CHECK3: omp_offload.failed6: 1241 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 1242 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT7]] 1243 // CHECK3: omp_offload.cont7: 1244 // CHECK3-NEXT: [[A8:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 1245 // CHECK3-NEXT: [[TMP35:%.*]] = load double, double* [[A8]], align 4 1246 // CHECK3-NEXT: [[CONV:%.*]] = fptosi double [[TMP35]] to i32 1247 // CHECK3-NEXT: ret i32 [[CONV]] 1248 // 1249 // 1250 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici 1251 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 1252 // CHECK3-NEXT: entry: 1253 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1254 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1255 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1256 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1257 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4 1258 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 1259 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 1260 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 1261 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 1262 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i32, align 4 1263 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [1 x i8*], align 4 1264 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [1 x i8*], align 4 1265 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [1 x i8*], align 4 1266 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1267 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1268 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1269 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 1270 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 1271 // CHECK3-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1272 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1273 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1274 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1275 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1276 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 1277 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 1278 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1279 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 1280 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 1281 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1282 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 1283 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 1284 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1285 // CHECK3-NEXT: store i8* null, i8** [[TMP10]], align 4 1286 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1287 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1288 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP12]], align 4 1289 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1290 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 1291 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 1292 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1293 // CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 1294 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1295 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1296 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1297 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1298 // CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 1299 // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 1300 // CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1301 // CHECK3: omp_offload.failed: 1302 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2]] 1303 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1304 // CHECK3: omp_offload.cont: 1305 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 1306 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 1307 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_3]], align 4 1308 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1309 // CHECK3-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 1310 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 1311 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 1312 // CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 1313 // CHECK3-NEXT: store i32 [[TMP24]], i32* [[TMP26]], align 4 1314 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 1315 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 1316 // CHECK3-NEXT: store i32 [[TMP24]], i32* [[TMP28]], align 4 1317 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0 1318 // CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 1319 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 1320 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 1321 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1322 // CHECK3-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 1323 // CHECK3-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 1324 // CHECK3-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] 1325 // CHECK3: omp_offload.failed8: 1326 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i32 [[TMP24]]) #[[ATTR2]] 1327 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT9]] 1328 // CHECK3: omp_offload.cont9: 1329 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 1330 // CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP35]], 1 1331 // CHECK3-NEXT: ret i32 [[ADD10]] 1332 // 1333 // 1334 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 1335 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 1336 // CHECK3-NEXT: entry: 1337 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1338 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 1339 // CHECK3-NEXT: [[B:%.*]] = alloca i16, align 2 1340 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 1341 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 1342 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1343 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1344 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 1345 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 1346 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 1347 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1348 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 1349 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 1350 // CHECK3-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 1351 // CHECK3-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1352 // CHECK3: omp_offload.failed: 1353 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 1354 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1355 // CHECK3: omp_offload.cont: 1356 // CHECK3-NEXT: store i16 1, i16* [[B]], align 2 1357 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 1358 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 1359 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1360 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 1361 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 1362 // CHECK3-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 1363 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[B_CASTED]] to i16* 1364 // CHECK3-NEXT: store i16 [[TMP5]], i16* [[CONV]], align 2 1365 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 1366 // CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 1367 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i16* 1368 // CHECK3-NEXT: store i16 [[TMP7]], i16* [[CONV1]], align 2 1369 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1370 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1371 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 1372 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[TMP10]], align 4 1373 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1374 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1375 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[TMP12]], align 4 1376 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1377 // CHECK3-NEXT: store i8* null, i8** [[TMP13]], align 4 1378 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1379 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 1380 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[TMP15]], align 4 1381 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1382 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 1383 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[TMP17]], align 4 1384 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1385 // CHECK3-NEXT: store i8* null, i8** [[TMP18]], align 4 1386 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1387 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 1388 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[TMP20]], align 4 1389 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1390 // CHECK3-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 1391 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[TMP22]], align 4 1392 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 1393 // CHECK3-NEXT: store i8* null, i8** [[TMP23]], align 4 1394 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1395 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1396 // CHECK3-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 1397 // CHECK3-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 1398 // CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 1399 // CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 1400 // CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 1401 // CHECK3: omp_offload.failed2: 1402 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR2]] 1403 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT3]] 1404 // CHECK3: omp_offload.cont3: 1405 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 1406 // CHECK3-NEXT: ret i32 [[TMP30]] 1407 // 1408 // 1409 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 1410 // CHECK3-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 1411 // CHECK3-NEXT: entry: 1412 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1413 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1414 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1415 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1416 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1417 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1418 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1419 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1420 // CHECK3-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1421 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1422 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 1423 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 1424 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 1425 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 1426 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 1427 // CHECK3-NEXT: ret void 1428 // 1429 // 1430 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 1431 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 1432 // CHECK3-NEXT: entry: 1433 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1434 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1435 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1436 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1437 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1438 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1439 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1440 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1441 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1442 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 1443 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 1444 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 1445 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1446 // CHECK3-NEXT: store double [[ADD]], double* [[A]], align 4 1447 // CHECK3-NEXT: ret void 1448 // 1449 // 1450 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 1451 // CHECK3-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 1452 // CHECK3-NEXT: entry: 1453 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1454 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1455 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1456 // CHECK3-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1457 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 1458 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 1459 // CHECK3-NEXT: ret void 1460 // 1461 // 1462 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 1463 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 1464 // CHECK3-NEXT: entry: 1465 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1466 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1467 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1468 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1469 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1470 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1471 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1472 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1473 // CHECK3-NEXT: store double 2.500000e+00, double* [[A]], align 4 1474 // CHECK3-NEXT: ret void 1475 // 1476 // 1477 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 1478 // CHECK3-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 1479 // CHECK3-NEXT: entry: 1480 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1481 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 1482 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1483 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1484 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 1485 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1486 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 1487 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 1488 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 1489 // CHECK3-NEXT: ret void 1490 // 1491 // 1492 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 1493 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1494 // CHECK3-NEXT: entry: 1495 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1496 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1497 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1498 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1499 // CHECK3-NEXT: ret void 1500 // 1501 // 1502 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 1503 // CHECK3-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 1504 // CHECK3-NEXT: entry: 1505 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1506 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1507 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1508 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1509 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 1510 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 1511 // CHECK3-NEXT: ret void 1512 // 1513 // 1514 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 1515 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1516 // CHECK3-NEXT: entry: 1517 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1518 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1519 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1520 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1521 // CHECK3-NEXT: ret void 1522 // 1523 // 1524 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 1525 // CHECK3-SAME: () #[[ATTR1]] { 1526 // CHECK3-NEXT: entry: 1527 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1528 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 1529 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 1530 // CHECK3-NEXT: ret void 1531 // 1532 // 1533 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 1534 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1535 // CHECK3-NEXT: entry: 1536 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1537 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1538 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1539 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1540 // CHECK3-NEXT: ret void 1541 // 1542 // 1543 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 1544 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 1545 // CHECK3-NEXT: entry: 1546 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1547 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1548 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1549 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 1550 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1551 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1552 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1553 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1554 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1555 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 1556 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 1557 // CHECK3-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 1558 // CHECK3-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 1559 // CHECK3-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 1560 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 1561 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 1562 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 1563 // CHECK3-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 1564 // CHECK3-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 1565 // CHECK3-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 1566 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 1567 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 1568 // CHECK3-NEXT: ret void 1569 // 1570 // 1571 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 1572 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 1573 // CHECK3-NEXT: entry: 1574 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1575 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1576 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1577 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1578 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1579 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1580 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1581 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1582 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 1583 // CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 1584 // CHECK3-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 1585 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 1586 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 1587 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 1588 // CHECK3-NEXT: ret void 1589 // 1590 // 1591 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1592 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] { 1593 // CHECK3-NEXT: entry: 1594 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 1595 // CHECK3-NEXT: ret void 1596 // 1597 // 1598 // CHECK4-LABEL: define {{[^@]+}}@_Z3bari 1599 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 1600 // CHECK4-NEXT: entry: 1601 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1602 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 1603 // CHECK4-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 1604 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1605 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 1606 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1607 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 1608 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 1609 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 1610 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A]], align 4 1611 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 1612 // CHECK4-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 1613 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1614 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 1615 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 1616 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1617 // CHECK4-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 1618 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 1619 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 1620 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 1621 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 1622 // CHECK4-NEXT: ret i32 [[TMP6]] 1623 // 1624 // 1625 // CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 1626 // CHECK4-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 1627 // CHECK4-NEXT: entry: 1628 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1629 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1630 // CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4 1631 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1632 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1633 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1634 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 1635 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 1636 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 1637 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 1638 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 1639 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 1640 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1641 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1642 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1643 // CHECK4-NEXT: store i32 1, i32* [[B]], align 4 1644 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1645 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 1646 // CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 1647 // CHECK4-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 1648 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 1649 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[B_CASTED]], align 4 1650 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_CASTED]], align 4 1651 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1652 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1653 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1654 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 1655 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1656 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 1657 // CHECK4-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 4 1658 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1659 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 1660 // CHECK4-NEXT: store double* [[A]], double** [[TMP9]], align 4 1661 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1662 // CHECK4-NEXT: store i8* null, i8** [[TMP10]], align 4 1663 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1664 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1665 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP12]], align 4 1666 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1667 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 1668 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP14]], align 4 1669 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1670 // CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 1671 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1672 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 1673 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 1674 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1675 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 1676 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 1677 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 1678 // CHECK4-NEXT: store i8* null, i8** [[TMP20]], align 4 1679 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1680 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1681 // CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1682 // CHECK4-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 1683 // CHECK4-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 1684 // CHECK4-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1685 // CHECK4: omp_offload.failed: 1686 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2:[0-9]+]] 1687 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 1688 // CHECK4: omp_offload.cont: 1689 // CHECK4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 1690 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1691 // CHECK4-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 1692 // CHECK4-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 4 1693 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1694 // CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 1695 // CHECK4-NEXT: store double* [[A2]], double** [[TMP29]], align 4 1696 // CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 1697 // CHECK4-NEXT: store i8* null, i8** [[TMP30]], align 4 1698 // CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1699 // CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1700 // CHECK4-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 1701 // CHECK4-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 1702 // CHECK4-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 1703 // CHECK4: omp_offload.failed6: 1704 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 1705 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT7]] 1706 // CHECK4: omp_offload.cont7: 1707 // CHECK4-NEXT: [[A8:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 1708 // CHECK4-NEXT: [[TMP35:%.*]] = load double, double* [[A8]], align 4 1709 // CHECK4-NEXT: [[CONV:%.*]] = fptosi double [[TMP35]] to i32 1710 // CHECK4-NEXT: ret i32 [[CONV]] 1711 // 1712 // 1713 // CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici 1714 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 1715 // CHECK4-NEXT: entry: 1716 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1717 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1718 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1719 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1720 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4 1721 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 1722 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 1723 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 1724 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 1725 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i32, align 4 1726 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [1 x i8*], align 4 1727 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [1 x i8*], align 4 1728 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [1 x i8*], align 4 1729 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1730 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1731 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1732 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 1733 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 1734 // CHECK4-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1735 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1736 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1737 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1738 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1739 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 1740 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 1741 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1742 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 1743 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 1744 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1745 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 1746 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 1747 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1748 // CHECK4-NEXT: store i8* null, i8** [[TMP10]], align 4 1749 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1750 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1751 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP12]], align 4 1752 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1753 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 1754 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 1755 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1756 // CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 1757 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1758 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1759 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1760 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1761 // CHECK4-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 1762 // CHECK4-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 1763 // CHECK4-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1764 // CHECK4: omp_offload.failed: 1765 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2]] 1766 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 1767 // CHECK4: omp_offload.cont: 1768 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 1769 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 1770 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_3]], align 4 1771 // CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1772 // CHECK4-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 1773 // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 1774 // CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 1775 // CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 1776 // CHECK4-NEXT: store i32 [[TMP24]], i32* [[TMP26]], align 4 1777 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 1778 // CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 1779 // CHECK4-NEXT: store i32 [[TMP24]], i32* [[TMP28]], align 4 1780 // CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0 1781 // CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 1782 // CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 1783 // CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 1784 // CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1785 // CHECK4-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 1786 // CHECK4-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 1787 // CHECK4-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] 1788 // CHECK4: omp_offload.failed8: 1789 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i32 [[TMP24]]) #[[ATTR2]] 1790 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT9]] 1791 // CHECK4: omp_offload.cont9: 1792 // CHECK4-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 1793 // CHECK4-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP35]], 1 1794 // CHECK4-NEXT: ret i32 [[ADD10]] 1795 // 1796 // 1797 // CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 1798 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 1799 // CHECK4-NEXT: entry: 1800 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1801 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 1802 // CHECK4-NEXT: [[B:%.*]] = alloca i16, align 2 1803 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 1804 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 1805 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1806 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 1807 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 1808 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 1809 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 1810 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1811 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 1812 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 1813 // CHECK4-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 1814 // CHECK4-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1815 // CHECK4: omp_offload.failed: 1816 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 1817 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 1818 // CHECK4: omp_offload.cont: 1819 // CHECK4-NEXT: store i16 1, i16* [[B]], align 2 1820 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 1821 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 1822 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1823 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 1824 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 1825 // CHECK4-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 1826 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[B_CASTED]] to i16* 1827 // CHECK4-NEXT: store i16 [[TMP5]], i16* [[CONV]], align 2 1828 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 1829 // CHECK4-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 1830 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i16* 1831 // CHECK4-NEXT: store i16 [[TMP7]], i16* [[CONV1]], align 2 1832 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 1833 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1834 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 1835 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[TMP10]], align 4 1836 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1837 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 1838 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[TMP12]], align 4 1839 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1840 // CHECK4-NEXT: store i8* null, i8** [[TMP13]], align 4 1841 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1842 // CHECK4-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 1843 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[TMP15]], align 4 1844 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1845 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 1846 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[TMP17]], align 4 1847 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1848 // CHECK4-NEXT: store i8* null, i8** [[TMP18]], align 4 1849 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1850 // CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 1851 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[TMP20]], align 4 1852 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1853 // CHECK4-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 1854 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[TMP22]], align 4 1855 // CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 1856 // CHECK4-NEXT: store i8* null, i8** [[TMP23]], align 4 1857 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1858 // CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1859 // CHECK4-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 1860 // CHECK4-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 1861 // CHECK4-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 1862 // CHECK4-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 1863 // CHECK4-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 1864 // CHECK4: omp_offload.failed2: 1865 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR2]] 1866 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT3]] 1867 // CHECK4: omp_offload.cont3: 1868 // CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 1869 // CHECK4-NEXT: ret i32 [[TMP30]] 1870 // 1871 // 1872 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 1873 // CHECK4-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 1874 // CHECK4-NEXT: entry: 1875 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1876 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1877 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1878 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 1879 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1880 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1881 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1882 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1883 // CHECK4-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1884 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1885 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 1886 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 1887 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 1888 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 1889 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 1890 // CHECK4-NEXT: ret void 1891 // 1892 // 1893 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 1894 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 1895 // CHECK4-NEXT: entry: 1896 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1897 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1898 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1899 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 1900 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1901 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1902 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1903 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 1904 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1905 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 1906 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 1907 // CHECK4-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 1908 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1909 // CHECK4-NEXT: store double [[ADD]], double* [[A]], align 4 1910 // CHECK4-NEXT: ret void 1911 // 1912 // 1913 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 1914 // CHECK4-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 1915 // CHECK4-NEXT: entry: 1916 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1917 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1918 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1919 // CHECK4-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1920 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 1921 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 1922 // CHECK4-NEXT: ret void 1923 // 1924 // 1925 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1 1926 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 1927 // CHECK4-NEXT: entry: 1928 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1929 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1930 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 1931 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1932 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1933 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 1934 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 1935 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1936 // CHECK4-NEXT: store double 2.500000e+00, double* [[A]], align 4 1937 // CHECK4-NEXT: ret void 1938 // 1939 // 1940 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 1941 // CHECK4-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 1942 // CHECK4-NEXT: entry: 1943 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1944 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 1945 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1946 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1947 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 1948 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1949 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 1950 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 1951 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 1952 // CHECK4-NEXT: ret void 1953 // 1954 // 1955 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4 1956 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1957 // CHECK4-NEXT: entry: 1958 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1959 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1960 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1961 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1962 // CHECK4-NEXT: ret void 1963 // 1964 // 1965 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 1966 // CHECK4-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 1967 // CHECK4-NEXT: entry: 1968 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 1969 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1970 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1971 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 1972 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 1973 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 1974 // CHECK4-NEXT: ret void 1975 // 1976 // 1977 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7 1978 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1979 // CHECK4-NEXT: entry: 1980 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1981 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1982 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1983 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1984 // CHECK4-NEXT: ret void 1985 // 1986 // 1987 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 1988 // CHECK4-SAME: () #[[ATTR1]] { 1989 // CHECK4-NEXT: entry: 1990 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 1991 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 1992 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 1993 // CHECK4-NEXT: ret void 1994 // 1995 // 1996 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10 1997 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1998 // CHECK4-NEXT: entry: 1999 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2000 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2001 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2002 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2003 // CHECK4-NEXT: ret void 2004 // 2005 // 2006 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 2007 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 2008 // CHECK4-NEXT: entry: 2009 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2010 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 2011 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 2012 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 2013 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 2014 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2015 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2016 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 2017 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2018 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 2019 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 2020 // CHECK4-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 2021 // CHECK4-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 2022 // CHECK4-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 2023 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 2024 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 2025 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 2026 // CHECK4-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 2027 // CHECK4-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 2028 // CHECK4-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 2029 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 2030 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 2031 // CHECK4-NEXT: ret void 2032 // 2033 // 2034 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 2035 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 2036 // CHECK4-NEXT: entry: 2037 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2038 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2039 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2040 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 2041 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2042 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2043 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2044 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 2045 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 2046 // CHECK4-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 2047 // CHECK4-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 2048 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 2049 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 2050 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 2051 // CHECK4-NEXT: ret void 2052 // 2053 // 2054 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2055 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] { 2056 // CHECK4-NEXT: entry: 2057 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 2058 // CHECK4-NEXT: ret void 2059 // 2060 // 2061 // CHECK5-LABEL: define {{[^@]+}}@_Z3bari 2062 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 2063 // CHECK5-NEXT: entry: 2064 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2065 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 2066 // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 2067 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2068 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 2069 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2070 // CHECK5-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 2071 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 2072 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 2073 // CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2074 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2075 // CHECK5-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 2076 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2077 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 2078 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 2079 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2080 // CHECK5-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 2081 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 2082 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 2083 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 2084 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 2085 // CHECK5-NEXT: ret i32 [[TMP6]] 2086 // 2087 // 2088 // CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 2089 // CHECK5-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 2090 // CHECK5-NEXT: entry: 2091 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2092 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2093 // CHECK5-NEXT: [[B:%.*]] = alloca i32, align 4 2094 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2095 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2096 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2097 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2098 // CHECK5-NEXT: store i32 1, i32* [[B]], align 4 2099 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2100 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 2101 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 2102 // CHECK5-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 2103 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 2104 // CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 2105 // CHECK5-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 2106 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 2107 // CHECK5-NEXT: store double [[ADD]], double* [[A]], align 8 2108 // CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2109 // CHECK5-NEXT: store double 2.500000e+00, double* [[A2]], align 8 2110 // CHECK5-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2111 // CHECK5-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 2112 // CHECK5-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 2113 // CHECK5-NEXT: ret i32 [[CONV4]] 2114 // 2115 // 2116 // CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici 2117 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 2118 // CHECK5-NEXT: entry: 2119 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2120 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2121 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2122 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 2123 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2124 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2125 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 2126 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2127 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 2128 // CHECK5-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2129 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2130 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 2131 // CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 2132 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 2133 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 2134 // CHECK5-NEXT: ret i32 [[ADD3]] 2135 // 2136 // 2137 // CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 2138 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 2139 // CHECK5-NEXT: entry: 2140 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2141 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 2142 // CHECK5-NEXT: [[B:%.*]] = alloca i16, align 2 2143 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 2144 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2145 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 2146 // CHECK5-NEXT: store i16 1, i16* [[B]], align 2 2147 // CHECK5-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 2148 // CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 2149 // CHECK5-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 2150 // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 2151 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 2152 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 2153 // CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2154 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2155 // CHECK5-NEXT: ret i32 [[TMP3]] 2156 // 2157 // 2158 // CHECK6-LABEL: define {{[^@]+}}@_Z3bari 2159 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 2160 // CHECK6-NEXT: entry: 2161 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2162 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 2163 // CHECK6-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 2164 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2165 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 2166 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2167 // CHECK6-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 2168 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 2169 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 2170 // CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2171 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2172 // CHECK6-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 2173 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2174 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 2175 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 2176 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2177 // CHECK6-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 2178 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 2179 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 2180 // CHECK6-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 2181 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 2182 // CHECK6-NEXT: ret i32 [[TMP6]] 2183 // 2184 // 2185 // CHECK6-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 2186 // CHECK6-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 2187 // CHECK6-NEXT: entry: 2188 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2189 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2190 // CHECK6-NEXT: [[B:%.*]] = alloca i32, align 4 2191 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2192 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2193 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2194 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2195 // CHECK6-NEXT: store i32 1, i32* [[B]], align 4 2196 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2197 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 2198 // CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 2199 // CHECK6-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 2200 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 2201 // CHECK6-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 2202 // CHECK6-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 2203 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 2204 // CHECK6-NEXT: store double [[ADD]], double* [[A]], align 8 2205 // CHECK6-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2206 // CHECK6-NEXT: store double 2.500000e+00, double* [[A2]], align 8 2207 // CHECK6-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2208 // CHECK6-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 2209 // CHECK6-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 2210 // CHECK6-NEXT: ret i32 [[CONV4]] 2211 // 2212 // 2213 // CHECK6-LABEL: define {{[^@]+}}@_ZL7fstatici 2214 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 2215 // CHECK6-NEXT: entry: 2216 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2217 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2218 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2219 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 2220 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2221 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2222 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 2223 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2224 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 2225 // CHECK6-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2226 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2227 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 2228 // CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 2229 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 2230 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 2231 // CHECK6-NEXT: ret i32 [[ADD3]] 2232 // 2233 // 2234 // CHECK6-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 2235 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 2236 // CHECK6-NEXT: entry: 2237 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2238 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 2239 // CHECK6-NEXT: [[B:%.*]] = alloca i16, align 2 2240 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 2241 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2242 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 2243 // CHECK6-NEXT: store i16 1, i16* [[B]], align 2 2244 // CHECK6-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 2245 // CHECK6-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 2246 // CHECK6-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 2247 // CHECK6-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 2248 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 2249 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 2250 // CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2251 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2252 // CHECK6-NEXT: ret i32 [[TMP3]] 2253 // 2254 // 2255 // CHECK7-LABEL: define {{[^@]+}}@_Z3bari 2256 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 2257 // CHECK7-NEXT: entry: 2258 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2259 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 2260 // CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 2261 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2262 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 2263 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2264 // CHECK7-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 2265 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 2266 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 2267 // CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2268 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2269 // CHECK7-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 2270 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2271 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 2272 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 2273 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2274 // CHECK7-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 2275 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 2276 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 2277 // CHECK7-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 2278 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 2279 // CHECK7-NEXT: ret i32 [[TMP6]] 2280 // 2281 // 2282 // CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 2283 // CHECK7-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 2284 // CHECK7-NEXT: entry: 2285 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2286 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2287 // CHECK7-NEXT: [[B:%.*]] = alloca i32, align 4 2288 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2289 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2290 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2291 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2292 // CHECK7-NEXT: store i32 1, i32* [[B]], align 4 2293 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2294 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 2295 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 2296 // CHECK7-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 2297 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 2298 // CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 2299 // CHECK7-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 2300 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 2301 // CHECK7-NEXT: store double [[ADD]], double* [[A]], align 4 2302 // CHECK7-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2303 // CHECK7-NEXT: store double 2.500000e+00, double* [[A2]], align 4 2304 // CHECK7-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2305 // CHECK7-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 2306 // CHECK7-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 2307 // CHECK7-NEXT: ret i32 [[CONV4]] 2308 // 2309 // 2310 // CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici 2311 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 2312 // CHECK7-NEXT: entry: 2313 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2314 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2315 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2316 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 2317 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2318 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2319 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 2320 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2321 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 2322 // CHECK7-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2323 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2324 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 2325 // CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 2326 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 2327 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 2328 // CHECK7-NEXT: ret i32 [[ADD3]] 2329 // 2330 // 2331 // CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 2332 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 2333 // CHECK7-NEXT: entry: 2334 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2335 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 2336 // CHECK7-NEXT: [[B:%.*]] = alloca i16, align 2 2337 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 2338 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2339 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 2340 // CHECK7-NEXT: store i16 1, i16* [[B]], align 2 2341 // CHECK7-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 2342 // CHECK7-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 2343 // CHECK7-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 2344 // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 2345 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 2346 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 2347 // CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2348 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2349 // CHECK7-NEXT: ret i32 [[TMP3]] 2350 // 2351 // 2352 // CHECK8-LABEL: define {{[^@]+}}@_Z3bari 2353 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 2354 // CHECK8-NEXT: entry: 2355 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2356 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 2357 // CHECK8-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 2358 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2359 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 2360 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2361 // CHECK8-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 2362 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 2363 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 2364 // CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2365 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2366 // CHECK8-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 2367 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2368 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 2369 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 2370 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2371 // CHECK8-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 2372 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 2373 // CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 2374 // CHECK8-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 2375 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 2376 // CHECK8-NEXT: ret i32 [[TMP6]] 2377 // 2378 // 2379 // CHECK8-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 2380 // CHECK8-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 2381 // CHECK8-NEXT: entry: 2382 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2383 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2384 // CHECK8-NEXT: [[B:%.*]] = alloca i32, align 4 2385 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2386 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2387 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2388 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2389 // CHECK8-NEXT: store i32 1, i32* [[B]], align 4 2390 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2391 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 2392 // CHECK8-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 2393 // CHECK8-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 2394 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 2395 // CHECK8-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 2396 // CHECK8-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 2397 // CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 2398 // CHECK8-NEXT: store double [[ADD]], double* [[A]], align 4 2399 // CHECK8-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2400 // CHECK8-NEXT: store double 2.500000e+00, double* [[A2]], align 4 2401 // CHECK8-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 2402 // CHECK8-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 2403 // CHECK8-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 2404 // CHECK8-NEXT: ret i32 [[CONV4]] 2405 // 2406 // 2407 // CHECK8-LABEL: define {{[^@]+}}@_ZL7fstatici 2408 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 2409 // CHECK8-NEXT: entry: 2410 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2411 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2412 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2413 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 2414 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2415 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2416 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 2417 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2418 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 2419 // CHECK8-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2420 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2421 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 2422 // CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 2423 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 2424 // CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 2425 // CHECK8-NEXT: ret i32 [[ADD3]] 2426 // 2427 // 2428 // CHECK8-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 2429 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 2430 // CHECK8-NEXT: entry: 2431 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2432 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 2433 // CHECK8-NEXT: [[B:%.*]] = alloca i16, align 2 2434 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 2435 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2436 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 2437 // CHECK8-NEXT: store i16 1, i16* [[B]], align 2 2438 // CHECK8-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 2439 // CHECK8-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 2440 // CHECK8-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 2441 // CHECK8-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 2442 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 2443 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 2444 // CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2445 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2446 // CHECK8-NEXT: ret i32 [[TMP3]] 2447 // 2448 // 2449 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 2450 // CHECK9-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 2451 // CHECK9-NEXT: entry: 2452 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2453 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 2454 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 2455 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2456 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 2457 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2458 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 2459 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2460 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 2461 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 2462 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2463 // CHECK9-NEXT: ret void 2464 // 2465 // 2466 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. 2467 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2468 // CHECK9-NEXT: entry: 2469 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2470 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2471 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2472 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2473 // CHECK9-NEXT: ret void 2474 // 2475 // 2476 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 2477 // CHECK9-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2478 // CHECK9-NEXT: entry: 2479 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2480 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2481 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2482 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2483 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2484 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 2485 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 2486 // CHECK9-NEXT: ret void 2487 // 2488 // 2489 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1 2490 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2491 // CHECK9-NEXT: entry: 2492 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2493 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2494 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2495 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2496 // CHECK9-NEXT: ret void 2497 // 2498 // 2499 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 2500 // CHECK9-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2501 // CHECK9-NEXT: entry: 2502 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2503 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2504 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2505 // CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 2506 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2507 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2508 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2509 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2510 // CHECK9-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2511 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 2512 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2513 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 2514 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 2515 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 2516 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 2517 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 2518 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 2519 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 2520 // CHECK9-NEXT: ret void 2521 // 2522 // 2523 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2 2524 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 2525 // CHECK9-NEXT: entry: 2526 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2527 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2528 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2529 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2530 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2531 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2532 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2533 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2534 // CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2535 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 2536 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2537 // CHECK9-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 2538 // CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 2539 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2540 // CHECK9-NEXT: store double [[ADD]], double* [[A]], align 8 2541 // CHECK9-NEXT: ret void 2542 // 2543 // 2544 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 2545 // CHECK9-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2546 // CHECK9-NEXT: entry: 2547 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2548 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2549 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2550 // CHECK9-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2551 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 2552 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 2553 // CHECK9-NEXT: ret void 2554 // 2555 // 2556 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 2557 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2558 // CHECK9-NEXT: entry: 2559 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2560 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2561 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2562 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2563 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2564 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2565 // CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2566 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2567 // CHECK9-NEXT: store double 2.500000e+00, double* [[A]], align 8 2568 // CHECK9-NEXT: ret void 2569 // 2570 // 2571 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 2572 // CHECK9-SAME: () #[[ATTR0]] { 2573 // CHECK9-NEXT: entry: 2574 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2575 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 2576 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 2577 // CHECK9-NEXT: ret void 2578 // 2579 // 2580 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 2581 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2582 // CHECK9-NEXT: entry: 2583 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2584 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2585 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2586 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2587 // CHECK9-NEXT: ret void 2588 // 2589 // 2590 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 2591 // CHECK9-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2592 // CHECK9-NEXT: entry: 2593 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2594 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2595 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2596 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2597 // CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 2598 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2599 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2600 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2601 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2602 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2603 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 2604 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 2605 // CHECK9-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 2606 // CHECK9-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 2607 // CHECK9-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 2608 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 2609 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2610 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 2611 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 2612 // CHECK9-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 2613 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 2614 // CHECK9-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 2615 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 2616 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 2617 // CHECK9-NEXT: ret void 2618 // 2619 // 2620 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 2621 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 2622 // CHECK9-NEXT: entry: 2623 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2624 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2625 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2626 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2627 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2628 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2629 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2630 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2631 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2632 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 2633 // CHECK9-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 2634 // CHECK9-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 2635 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2636 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 2637 // CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 2638 // CHECK9-NEXT: ret void 2639 // 2640 // 2641 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 2642 // CHECK10-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 2643 // CHECK10-NEXT: entry: 2644 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2645 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 2646 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 2647 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2648 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 2649 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2650 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 2651 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2652 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 2653 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 2654 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2655 // CHECK10-NEXT: ret void 2656 // 2657 // 2658 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. 2659 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2660 // CHECK10-NEXT: entry: 2661 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2662 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2663 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2664 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2665 // CHECK10-NEXT: ret void 2666 // 2667 // 2668 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 2669 // CHECK10-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2670 // CHECK10-NEXT: entry: 2671 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2672 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2673 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2674 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2675 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2676 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 2677 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 2678 // CHECK10-NEXT: ret void 2679 // 2680 // 2681 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1 2682 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2683 // CHECK10-NEXT: entry: 2684 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2685 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2686 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2687 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2688 // CHECK10-NEXT: ret void 2689 // 2690 // 2691 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 2692 // CHECK10-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2693 // CHECK10-NEXT: entry: 2694 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2695 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2696 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2697 // CHECK10-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 2698 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2699 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2700 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2701 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2702 // CHECK10-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2703 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 2704 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2705 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 2706 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 2707 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 2708 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 2709 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 2710 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 2711 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 2712 // CHECK10-NEXT: ret void 2713 // 2714 // 2715 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2 2716 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 2717 // CHECK10-NEXT: entry: 2718 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2719 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2720 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2721 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2722 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2723 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2724 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2725 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2726 // CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2727 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 2728 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2729 // CHECK10-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 2730 // CHECK10-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 2731 // CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2732 // CHECK10-NEXT: store double [[ADD]], double* [[A]], align 8 2733 // CHECK10-NEXT: ret void 2734 // 2735 // 2736 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 2737 // CHECK10-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2738 // CHECK10-NEXT: entry: 2739 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2740 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2741 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2742 // CHECK10-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2743 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 2744 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 2745 // CHECK10-NEXT: ret void 2746 // 2747 // 2748 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 2749 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2750 // CHECK10-NEXT: entry: 2751 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2752 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2753 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2754 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2755 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2756 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2757 // CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2758 // CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2759 // CHECK10-NEXT: store double 2.500000e+00, double* [[A]], align 8 2760 // CHECK10-NEXT: ret void 2761 // 2762 // 2763 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 2764 // CHECK10-SAME: () #[[ATTR0]] { 2765 // CHECK10-NEXT: entry: 2766 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2767 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 2768 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 2769 // CHECK10-NEXT: ret void 2770 // 2771 // 2772 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 2773 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2774 // CHECK10-NEXT: entry: 2775 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2776 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2777 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2778 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2779 // CHECK10-NEXT: ret void 2780 // 2781 // 2782 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 2783 // CHECK10-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2784 // CHECK10-NEXT: entry: 2785 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2786 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2787 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2788 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2789 // CHECK10-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 2790 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2791 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2792 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2793 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2794 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2795 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 2796 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 2797 // CHECK10-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 2798 // CHECK10-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 2799 // CHECK10-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 2800 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 2801 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2802 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 2803 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 2804 // CHECK10-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 2805 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 2806 // CHECK10-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 2807 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 2808 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 2809 // CHECK10-NEXT: ret void 2810 // 2811 // 2812 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 2813 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 2814 // CHECK10-NEXT: entry: 2815 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2816 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2817 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2818 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 2819 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2820 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2821 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2822 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 2823 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2824 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 2825 // CHECK10-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 2826 // CHECK10-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 2827 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 2828 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 2829 // CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 2830 // CHECK10-NEXT: ret void 2831 // 2832 // 2833 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 2834 // CHECK11-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 2835 // CHECK11-NEXT: entry: 2836 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 2837 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 2838 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 2839 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2840 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 2841 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2842 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 2843 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 2844 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2845 // CHECK11-NEXT: ret void 2846 // 2847 // 2848 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined. 2849 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2850 // CHECK11-NEXT: entry: 2851 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2852 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2853 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2854 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2855 // CHECK11-NEXT: ret void 2856 // 2857 // 2858 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 2859 // CHECK11-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2860 // CHECK11-NEXT: entry: 2861 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 2862 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2863 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2864 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2865 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 2866 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 2867 // CHECK11-NEXT: ret void 2868 // 2869 // 2870 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1 2871 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2872 // CHECK11-NEXT: entry: 2873 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2874 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2875 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2876 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2877 // CHECK11-NEXT: ret void 2878 // 2879 // 2880 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 2881 // CHECK11-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2882 // CHECK11-NEXT: entry: 2883 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2884 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 2885 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 2886 // CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 2887 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2888 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2889 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 2890 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2891 // CHECK11-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2892 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2893 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 2894 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 2895 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 2896 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 2897 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 2898 // CHECK11-NEXT: ret void 2899 // 2900 // 2901 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2 2902 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 2903 // CHECK11-NEXT: entry: 2904 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2905 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2906 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2907 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 2908 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2909 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2910 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2911 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 2912 // CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2913 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 2914 // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 2915 // CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 2916 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2917 // CHECK11-NEXT: store double [[ADD]], double* [[A]], align 4 2918 // CHECK11-NEXT: ret void 2919 // 2920 // 2921 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 2922 // CHECK11-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2923 // CHECK11-NEXT: entry: 2924 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2925 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2926 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2927 // CHECK11-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2928 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 2929 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 2930 // CHECK11-NEXT: ret void 2931 // 2932 // 2933 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 2934 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 2935 // CHECK11-NEXT: entry: 2936 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2937 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2938 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 2939 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2940 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2941 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 2942 // CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 2943 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 2944 // CHECK11-NEXT: store double 2.500000e+00, double* [[A]], align 4 2945 // CHECK11-NEXT: ret void 2946 // 2947 // 2948 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 2949 // CHECK11-SAME: () #[[ATTR0]] { 2950 // CHECK11-NEXT: entry: 2951 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2952 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 2953 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 2954 // CHECK11-NEXT: ret void 2955 // 2956 // 2957 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 2958 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2959 // CHECK11-NEXT: entry: 2960 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2961 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2962 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2963 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2964 // CHECK11-NEXT: ret void 2965 // 2966 // 2967 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 2968 // CHECK11-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 2969 // CHECK11-NEXT: entry: 2970 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2971 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 2972 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 2973 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 2974 // CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 2975 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 2976 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2977 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 2978 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 2979 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 2980 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 2981 // CHECK11-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 2982 // CHECK11-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 2983 // CHECK11-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 2984 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 2985 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 2986 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 2987 // CHECK11-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 2988 // CHECK11-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 2989 // CHECK11-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 2990 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 2991 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 2992 // CHECK11-NEXT: ret void 2993 // 2994 // 2995 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 2996 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 2997 // CHECK11-NEXT: entry: 2998 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2999 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3000 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3001 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 3002 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3003 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3004 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3005 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 3006 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 3007 // CHECK11-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 3008 // CHECK11-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 3009 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 3010 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 3011 // CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 3012 // CHECK11-NEXT: ret void 3013 // 3014 // 3015 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 3016 // CHECK12-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 3017 // CHECK12-NEXT: entry: 3018 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 3019 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 3020 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 3021 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3022 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 3023 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3024 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 3025 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 3026 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 3027 // CHECK12-NEXT: ret void 3028 // 3029 // 3030 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined. 3031 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 3032 // CHECK12-NEXT: entry: 3033 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3034 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3035 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3036 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3037 // CHECK12-NEXT: ret void 3038 // 3039 // 3040 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 3041 // CHECK12-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 3042 // CHECK12-NEXT: entry: 3043 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 3044 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3045 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3046 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3047 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 3048 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 3049 // CHECK12-NEXT: ret void 3050 // 3051 // 3052 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1 3053 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 3054 // CHECK12-NEXT: entry: 3055 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3056 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3057 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3058 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3059 // CHECK12-NEXT: ret void 3060 // 3061 // 3062 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 3063 // CHECK12-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 3064 // CHECK12-NEXT: entry: 3065 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3066 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 3067 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 3068 // CHECK12-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 3069 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3070 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3071 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 3072 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3073 // CHECK12-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3074 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3075 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 3076 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 3077 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 3078 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 3079 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 3080 // CHECK12-NEXT: ret void 3081 // 3082 // 3083 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2 3084 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 3085 // CHECK12-NEXT: entry: 3086 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3087 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3088 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3089 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 3090 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3091 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3092 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3093 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 3094 // CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3095 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 3096 // CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 3097 // CHECK12-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 3098 // CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3099 // CHECK12-NEXT: store double [[ADD]], double* [[A]], align 4 3100 // CHECK12-NEXT: ret void 3101 // 3102 // 3103 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 3104 // CHECK12-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 3105 // CHECK12-NEXT: entry: 3106 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3107 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3108 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3109 // CHECK12-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3110 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 3111 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 3112 // CHECK12-NEXT: ret void 3113 // 3114 // 3115 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 3116 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 3117 // CHECK12-NEXT: entry: 3118 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3119 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3120 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3121 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3122 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3123 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3124 // CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3125 // CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3126 // CHECK12-NEXT: store double 2.500000e+00, double* [[A]], align 4 3127 // CHECK12-NEXT: ret void 3128 // 3129 // 3130 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 3131 // CHECK12-SAME: () #[[ATTR0]] { 3132 // CHECK12-NEXT: entry: 3133 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3134 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 3135 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 3136 // CHECK12-NEXT: ret void 3137 // 3138 // 3139 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 3140 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 3141 // CHECK12-NEXT: entry: 3142 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3143 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3144 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3145 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3146 // CHECK12-NEXT: ret void 3147 // 3148 // 3149 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 3150 // CHECK12-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 3151 // CHECK12-NEXT: entry: 3152 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3153 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 3154 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 3155 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 3156 // CHECK12-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 3157 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3158 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3159 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 3160 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 3161 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 3162 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 3163 // CHECK12-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 3164 // CHECK12-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 3165 // CHECK12-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 3166 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 3167 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 3168 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 3169 // CHECK12-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 3170 // CHECK12-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 3171 // CHECK12-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 3172 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 3173 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 3174 // CHECK12-NEXT: ret void 3175 // 3176 // 3177 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 3178 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 3179 // CHECK12-NEXT: entry: 3180 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3181 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3182 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3183 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 3184 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3185 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3186 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3187 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 3188 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 3189 // CHECK12-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 3190 // CHECK12-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 3191 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 3192 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 3193 // CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 3194 // CHECK12-NEXT: ret void 3195 // 3196 // 3197 // CHECK13-LABEL: define {{[^@]+}}@_Z3bari 3198 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 3199 // CHECK13-NEXT: entry: 3200 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3201 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 3202 // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 3203 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3204 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 3205 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3206 // CHECK13-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 3207 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3208 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3209 // CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3210 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3211 // CHECK13-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 3212 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3213 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3214 // CHECK13-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3215 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3216 // CHECK13-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 3217 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3218 // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3219 // CHECK13-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3220 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 3221 // CHECK13-NEXT: ret i32 [[TMP6]] 3222 // 3223 // 3224 // CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3225 // CHECK13-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3226 // CHECK13-NEXT: entry: 3227 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3228 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3229 // CHECK13-NEXT: [[B:%.*]] = alloca i32, align 4 3230 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3231 // CHECK13-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3232 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3233 // CHECK13-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3234 // CHECK13-NEXT: store i32 1, i32* [[B]], align 4 3235 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3236 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 3237 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 3238 // CHECK13-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 3239 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 3240 // CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 3241 // CHECK13-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 3242 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3243 // CHECK13-NEXT: store double [[ADD]], double* [[A]], align 8 3244 // CHECK13-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3245 // CHECK13-NEXT: store double 2.500000e+00, double* [[A2]], align 8 3246 // CHECK13-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3247 // CHECK13-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 3248 // CHECK13-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 3249 // CHECK13-NEXT: ret i32 [[CONV4]] 3250 // 3251 // 3252 // CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici 3253 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 3254 // CHECK13-NEXT: entry: 3255 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3256 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3257 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3258 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 3259 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3260 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3261 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 3262 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3263 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 3264 // CHECK13-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3265 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3266 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 3267 // CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 3268 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3269 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 3270 // CHECK13-NEXT: ret i32 [[ADD3]] 3271 // 3272 // 3273 // CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3274 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 3275 // CHECK13-NEXT: entry: 3276 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3277 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 3278 // CHECK13-NEXT: [[B:%.*]] = alloca i16, align 2 3279 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 3280 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3281 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 3282 // CHECK13-NEXT: store i16 1, i16* [[B]], align 2 3283 // CHECK13-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 3284 // CHECK13-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 3285 // CHECK13-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 3286 // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 3287 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 3288 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 3289 // CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3290 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3291 // CHECK13-NEXT: ret i32 [[TMP3]] 3292 // 3293 // 3294 // CHECK14-LABEL: define {{[^@]+}}@_Z3bari 3295 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 3296 // CHECK14-NEXT: entry: 3297 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3298 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 3299 // CHECK14-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 3300 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3301 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 3302 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3303 // CHECK14-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 3304 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3305 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3306 // CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3307 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3308 // CHECK14-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 3309 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3310 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3311 // CHECK14-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3312 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3313 // CHECK14-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 3314 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3315 // CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3316 // CHECK14-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3317 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 3318 // CHECK14-NEXT: ret i32 [[TMP6]] 3319 // 3320 // 3321 // CHECK14-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3322 // CHECK14-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3323 // CHECK14-NEXT: entry: 3324 // CHECK14-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3325 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3326 // CHECK14-NEXT: [[B:%.*]] = alloca i32, align 4 3327 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3328 // CHECK14-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3329 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3330 // CHECK14-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3331 // CHECK14-NEXT: store i32 1, i32* [[B]], align 4 3332 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3333 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 3334 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 3335 // CHECK14-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 3336 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 3337 // CHECK14-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 3338 // CHECK14-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 3339 // CHECK14-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3340 // CHECK14-NEXT: store double [[ADD]], double* [[A]], align 8 3341 // CHECK14-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3342 // CHECK14-NEXT: store double 2.500000e+00, double* [[A2]], align 8 3343 // CHECK14-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3344 // CHECK14-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 3345 // CHECK14-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 3346 // CHECK14-NEXT: ret i32 [[CONV4]] 3347 // 3348 // 3349 // CHECK14-LABEL: define {{[^@]+}}@_ZL7fstatici 3350 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 3351 // CHECK14-NEXT: entry: 3352 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3353 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3354 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3355 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 3356 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3357 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3358 // CHECK14-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 3359 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3360 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 3361 // CHECK14-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3362 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3363 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 3364 // CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 3365 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3366 // CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 3367 // CHECK14-NEXT: ret i32 [[ADD3]] 3368 // 3369 // 3370 // CHECK14-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3371 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 3372 // CHECK14-NEXT: entry: 3373 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3374 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 3375 // CHECK14-NEXT: [[B:%.*]] = alloca i16, align 2 3376 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 3377 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3378 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 3379 // CHECK14-NEXT: store i16 1, i16* [[B]], align 2 3380 // CHECK14-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 3381 // CHECK14-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 3382 // CHECK14-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 3383 // CHECK14-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 3384 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 3385 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 3386 // CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3387 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3388 // CHECK14-NEXT: ret i32 [[TMP3]] 3389 // 3390 // 3391 // CHECK15-LABEL: define {{[^@]+}}@_Z3bari 3392 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 3393 // CHECK15-NEXT: entry: 3394 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3395 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 3396 // CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 3397 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3398 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 3399 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3400 // CHECK15-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 3401 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3402 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3403 // CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3404 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3405 // CHECK15-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 3406 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3407 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3408 // CHECK15-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3409 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3410 // CHECK15-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 3411 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3412 // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3413 // CHECK15-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3414 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 3415 // CHECK15-NEXT: ret i32 [[TMP6]] 3416 // 3417 // 3418 // CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3419 // CHECK15-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3420 // CHECK15-NEXT: entry: 3421 // CHECK15-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3422 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3423 // CHECK15-NEXT: [[B:%.*]] = alloca i32, align 4 3424 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3425 // CHECK15-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3426 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3427 // CHECK15-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3428 // CHECK15-NEXT: store i32 1, i32* [[B]], align 4 3429 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3430 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 3431 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 3432 // CHECK15-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 3433 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 3434 // CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 3435 // CHECK15-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 3436 // CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3437 // CHECK15-NEXT: store double [[ADD]], double* [[A]], align 4 3438 // CHECK15-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3439 // CHECK15-NEXT: store double 2.500000e+00, double* [[A2]], align 4 3440 // CHECK15-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3441 // CHECK15-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 3442 // CHECK15-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 3443 // CHECK15-NEXT: ret i32 [[CONV4]] 3444 // 3445 // 3446 // CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici 3447 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 3448 // CHECK15-NEXT: entry: 3449 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3450 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3451 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3452 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 3453 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3454 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3455 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 3456 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3457 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 3458 // CHECK15-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3459 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3460 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 3461 // CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 3462 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3463 // CHECK15-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 3464 // CHECK15-NEXT: ret i32 [[ADD3]] 3465 // 3466 // 3467 // CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3468 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 3469 // CHECK15-NEXT: entry: 3470 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3471 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 3472 // CHECK15-NEXT: [[B:%.*]] = alloca i16, align 2 3473 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 3474 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3475 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 3476 // CHECK15-NEXT: store i16 1, i16* [[B]], align 2 3477 // CHECK15-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 3478 // CHECK15-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 3479 // CHECK15-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 3480 // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 3481 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 3482 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 3483 // CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3484 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3485 // CHECK15-NEXT: ret i32 [[TMP3]] 3486 // 3487 // 3488 // CHECK16-LABEL: define {{[^@]+}}@_Z3bari 3489 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 3490 // CHECK16-NEXT: entry: 3491 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3492 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 3493 // CHECK16-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 3494 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3495 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 3496 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3497 // CHECK16-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 3498 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3499 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3500 // CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3501 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3502 // CHECK16-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 3503 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3504 // CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3505 // CHECK16-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3506 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3507 // CHECK16-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 3508 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3509 // CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3510 // CHECK16-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3511 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 3512 // CHECK16-NEXT: ret i32 [[TMP6]] 3513 // 3514 // 3515 // CHECK16-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3516 // CHECK16-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3517 // CHECK16-NEXT: entry: 3518 // CHECK16-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 3519 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3520 // CHECK16-NEXT: [[B:%.*]] = alloca i32, align 4 3521 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3522 // CHECK16-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 3523 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3524 // CHECK16-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 3525 // CHECK16-NEXT: store i32 1, i32* [[B]], align 4 3526 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3527 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 3528 // CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 3529 // CHECK16-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 3530 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 3531 // CHECK16-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 3532 // CHECK16-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 3533 // CHECK16-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3534 // CHECK16-NEXT: store double [[ADD]], double* [[A]], align 4 3535 // CHECK16-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3536 // CHECK16-NEXT: store double 2.500000e+00, double* [[A2]], align 4 3537 // CHECK16-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3538 // CHECK16-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 3539 // CHECK16-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 3540 // CHECK16-NEXT: ret i32 [[CONV4]] 3541 // 3542 // 3543 // CHECK16-LABEL: define {{[^@]+}}@_ZL7fstatici 3544 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 3545 // CHECK16-NEXT: entry: 3546 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3547 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3548 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3549 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 3550 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3551 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3552 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 3553 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3554 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 3555 // CHECK16-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3556 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3557 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 3558 // CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 3559 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3560 // CHECK16-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 3561 // CHECK16-NEXT: ret i32 [[ADD3]] 3562 // 3563 // 3564 // CHECK16-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3565 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 3566 // CHECK16-NEXT: entry: 3567 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3568 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 3569 // CHECK16-NEXT: [[B:%.*]] = alloca i16, align 2 3570 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 3571 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3572 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 3573 // CHECK16-NEXT: store i16 1, i16* [[B]], align 2 3574 // CHECK16-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 3575 // CHECK16-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 3576 // CHECK16-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 3577 // CHECK16-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 3578 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 3579 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 3580 // CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3581 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3582 // CHECK16-NEXT: ret i32 [[TMP3]] 3583 // 3584 // 3585 // CHECK17-LABEL: define {{[^@]+}}@_Z3bari 3586 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 3587 // CHECK17-NEXT: entry: 3588 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3589 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 3590 // CHECK17-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 3591 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3592 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 3593 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3594 // CHECK17-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 3595 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3596 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3597 // CHECK17-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3598 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3599 // CHECK17-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 3600 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3601 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3602 // CHECK17-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3603 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3604 // CHECK17-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 3605 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3606 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3607 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3608 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 3609 // CHECK17-NEXT: ret i32 [[TMP6]] 3610 // 3611 // 3612 // CHECK17-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3613 // CHECK17-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3614 // CHECK17-NEXT: entry: 3615 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3616 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3617 // CHECK17-NEXT: [[B:%.*]] = alloca i32, align 4 3618 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3619 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3620 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 3621 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 3622 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 3623 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 3624 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8 3625 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8 3626 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8 3627 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3628 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3629 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3630 // CHECK17-NEXT: store i32 1, i32* [[B]], align 4 3631 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3632 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 3633 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 3634 // CHECK17-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 3635 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 3636 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 3637 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 3638 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[B_CASTED]], align 8 3639 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3640 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 3641 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV2]], align 4 3642 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 3643 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3644 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3645 // CHECK17-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 3646 // CHECK17-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 8 3647 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3648 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 3649 // CHECK17-NEXT: store double* [[A]], double** [[TMP9]], align 8 3650 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3651 // CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 8 3652 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3653 // CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 3654 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP12]], align 8 3655 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3656 // CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 3657 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP14]], align 8 3658 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3659 // CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 3660 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3661 // CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 3662 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP17]], align 8 3663 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3664 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 3665 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP19]], align 8 3666 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3667 // CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 8 3668 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3669 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3670 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3671 // CHECK17-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 3672 // CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 3673 // CHECK17-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3674 // CHECK17: omp_offload.failed: 3675 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2:[0-9]+]] 3676 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 3677 // CHECK17: omp_offload.cont: 3678 // CHECK17-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3679 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 3680 // CHECK17-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 3681 // CHECK17-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 8 3682 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 3683 // CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 3684 // CHECK17-NEXT: store double* [[A3]], double** [[TMP29]], align 8 3685 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 3686 // CHECK17-NEXT: store i8* null, i8** [[TMP30]], align 8 3687 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 3688 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 3689 // CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 3690 // CHECK17-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 3691 // CHECK17-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 3692 // CHECK17: omp_offload.failed7: 3693 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 3694 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT8]] 3695 // CHECK17: omp_offload.cont8: 3696 // CHECK17-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 3697 // CHECK17-NEXT: [[TMP35:%.*]] = load double, double* [[A9]], align 8 3698 // CHECK17-NEXT: [[CONV10:%.*]] = fptosi double [[TMP35]] to i32 3699 // CHECK17-NEXT: ret i32 [[CONV10]] 3700 // 3701 // 3702 // CHECK17-LABEL: define {{[^@]+}}@_ZL7fstatici 3703 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 3704 // CHECK17-NEXT: entry: 3705 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3706 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3707 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3708 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 3709 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8 3710 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 3711 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 3712 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 3713 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 3714 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i64, align 8 3715 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 3716 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 3717 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 3718 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3719 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3720 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 3721 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3722 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 3723 // CHECK17-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3724 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3725 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 3726 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 3727 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 3728 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3729 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32* 3730 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 3731 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8 3732 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3733 // CHECK17-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 3734 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP7]], align 8 3735 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3736 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 3737 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP9]], align 8 3738 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3739 // CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 8 3740 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3741 // CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 3742 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP12]], align 8 3743 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3744 // CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 3745 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP14]], align 8 3746 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3747 // CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 3748 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3749 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3750 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3751 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3752 // CHECK17-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 3753 // CHECK17-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 3754 // CHECK17-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3755 // CHECK17: omp_offload.failed: 3756 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2]] 3757 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 3758 // CHECK17: omp_offload.cont: 3759 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 3760 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 3761 // CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_4]], align 4 3762 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 3763 // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5]] to i32* 3764 // CHECK17-NEXT: store i32 [[TMP23]], i32* [[CONV6]], align 4 3765 // CHECK17-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5]], align 8 3766 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 3767 // CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 3768 // CHECK17-NEXT: store i64 [[TMP24]], i64* [[TMP26]], align 8 3769 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 3770 // CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 3771 // CHECK17-NEXT: store i64 [[TMP24]], i64* [[TMP28]], align 8 3772 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 3773 // CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 3774 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 3775 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 3776 // CHECK17-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 3777 // CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 3778 // CHECK17-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 3779 // CHECK17-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] 3780 // CHECK17: omp_offload.failed10: 3781 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i64 [[TMP24]]) #[[ATTR2]] 3782 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT11]] 3783 // CHECK17: omp_offload.cont11: 3784 // CHECK17-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 3785 // CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP35]], 1 3786 // CHECK17-NEXT: ret i32 [[ADD12]] 3787 // 3788 // 3789 // CHECK17-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3790 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 3791 // CHECK17-NEXT: entry: 3792 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3793 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 3794 // CHECK17-NEXT: [[B:%.*]] = alloca i16, align 2 3795 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 3796 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3797 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3798 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 3799 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 3800 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 3801 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 3802 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3803 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 3804 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 3805 // CHECK17-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 3806 // CHECK17-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3807 // CHECK17: omp_offload.failed: 3808 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 3809 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 3810 // CHECK17: omp_offload.cont: 3811 // CHECK17-NEXT: store i16 1, i16* [[B]], align 2 3812 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 3813 // CHECK17-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 3814 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3815 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3816 // CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 3817 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 3818 // CHECK17-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 3819 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_CASTED]] to i16* 3820 // CHECK17-NEXT: store i16 [[TMP5]], i16* [[CONV1]], align 2 3821 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 3822 // CHECK17-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 3823 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i16* 3824 // CHECK17-NEXT: store i16 [[TMP7]], i16* [[CONV2]], align 2 3825 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 3826 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3827 // CHECK17-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 3828 // CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP10]], align 8 3829 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3830 // CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 3831 // CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP12]], align 8 3832 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3833 // CHECK17-NEXT: store i8* null, i8** [[TMP13]], align 8 3834 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3835 // CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 3836 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 3837 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3838 // CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 3839 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 3840 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3841 // CHECK17-NEXT: store i8* null, i8** [[TMP18]], align 8 3842 // CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3843 // CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 3844 // CHECK17-NEXT: store i64 [[TMP8]], i64* [[TMP20]], align 8 3845 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3846 // CHECK17-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 3847 // CHECK17-NEXT: store i64 [[TMP8]], i64* [[TMP22]], align 8 3848 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3849 // CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8 3850 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3851 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3852 // CHECK17-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 3853 // CHECK17-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 3854 // CHECK17-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 3855 // CHECK17-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 3856 // CHECK17-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] 3857 // CHECK17: omp_offload.failed3: 3858 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2]] 3859 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT4]] 3860 // CHECK17: omp_offload.cont4: 3861 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 3862 // CHECK17-NEXT: ret i32 [[TMP30]] 3863 // 3864 // 3865 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 3866 // CHECK17-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 3867 // CHECK17-NEXT: entry: 3868 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3869 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3870 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3871 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3872 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3873 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3874 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3875 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3876 // CHECK17-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3877 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3878 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 3879 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 3880 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 3881 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 3882 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 3883 // CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 3884 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 3885 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 3886 // CHECK17-NEXT: ret void 3887 // 3888 // 3889 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined. 3890 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 3891 // CHECK17-NEXT: entry: 3892 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3893 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3894 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3895 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3896 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3897 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3898 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3899 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3900 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3901 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3902 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3903 // CHECK17-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 3904 // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 3905 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3906 // CHECK17-NEXT: store double [[ADD]], double* [[A]], align 8 3907 // CHECK17-NEXT: ret void 3908 // 3909 // 3910 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 3911 // CHECK17-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 3912 // CHECK17-NEXT: entry: 3913 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3914 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3915 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3916 // CHECK17-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3917 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 3918 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 3919 // CHECK17-NEXT: ret void 3920 // 3921 // 3922 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1 3923 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 3924 // CHECK17-NEXT: entry: 3925 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3926 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3927 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3928 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3929 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3930 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3931 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3932 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3933 // CHECK17-NEXT: store double 2.500000e+00, double* [[A]], align 8 3934 // CHECK17-NEXT: ret void 3935 // 3936 // 3937 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 3938 // CHECK17-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 3939 // CHECK17-NEXT: entry: 3940 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3941 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 3942 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3943 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3944 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 3945 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 3946 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 3947 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3948 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 3949 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 3950 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 3951 // CHECK17-NEXT: ret void 3952 // 3953 // 3954 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4 3955 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3956 // CHECK17-NEXT: entry: 3957 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3958 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3959 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3960 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3961 // CHECK17-NEXT: ret void 3962 // 3963 // 3964 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 3965 // CHECK17-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 3966 // CHECK17-NEXT: entry: 3967 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3968 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3969 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3970 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 3971 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3972 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 3973 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 3974 // CHECK17-NEXT: ret void 3975 // 3976 // 3977 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 3978 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3979 // CHECK17-NEXT: entry: 3980 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3981 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3982 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3983 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3984 // CHECK17-NEXT: ret void 3985 // 3986 // 3987 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 3988 // CHECK17-SAME: () #[[ATTR1]] { 3989 // CHECK17-NEXT: entry: 3990 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 3991 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 3992 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 3993 // CHECK17-NEXT: ret void 3994 // 3995 // 3996 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 3997 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3998 // CHECK17-NEXT: entry: 3999 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4000 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4001 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4002 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4003 // CHECK17-NEXT: ret void 4004 // 4005 // 4006 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 4007 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 4008 // CHECK17-NEXT: entry: 4009 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 4010 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4011 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 4012 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 4013 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 4014 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4015 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 4016 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4017 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 4018 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 4019 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 4020 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 4021 // CHECK17-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 4022 // CHECK17-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 4023 // CHECK17-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 4024 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 4025 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 4026 // CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 4027 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 4028 // CHECK17-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 4029 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 4030 // CHECK17-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 4031 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 4032 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 4033 // CHECK17-NEXT: ret void 4034 // 4035 // 4036 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 4037 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 4038 // CHECK17-NEXT: entry: 4039 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4040 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4041 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 4042 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4043 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4044 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4045 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 4046 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4047 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 4048 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 4049 // CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 4050 // CHECK17-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 4051 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 4052 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 4053 // CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 4054 // CHECK17-NEXT: ret void 4055 // 4056 // 4057 // CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4058 // CHECK17-SAME: () #[[ATTR3:[0-9]+]] { 4059 // CHECK17-NEXT: entry: 4060 // CHECK17-NEXT: call void @__tgt_register_requires(i64 1) 4061 // CHECK17-NEXT: ret void 4062 // 4063 // 4064 // CHECK18-LABEL: define {{[^@]+}}@_Z3bari 4065 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 4066 // CHECK18-NEXT: entry: 4067 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4068 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 4069 // CHECK18-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 4070 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4071 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 4072 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4073 // CHECK18-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 4074 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 4075 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 4076 // CHECK18-NEXT: store i32 [[ADD]], i32* [[A]], align 4 4077 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 4078 // CHECK18-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 4079 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4080 // CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 4081 // CHECK18-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 4082 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4083 // CHECK18-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 4084 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 4085 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 4086 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 4087 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 4088 // CHECK18-NEXT: ret i32 [[TMP6]] 4089 // 4090 // 4091 // CHECK18-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 4092 // CHECK18-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 4093 // CHECK18-NEXT: entry: 4094 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 4095 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4096 // CHECK18-NEXT: [[B:%.*]] = alloca i32, align 4 4097 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4098 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 4099 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 4100 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 4101 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 4102 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 4103 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x i8*], align 8 4104 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x i8*], align 8 4105 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x i8*], align 8 4106 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 4107 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4108 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 4109 // CHECK18-NEXT: store i32 1, i32* [[B]], align 4 4110 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4111 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 4112 // CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 4113 // CHECK18-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 4114 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 4115 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 4116 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 4117 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[B_CASTED]], align 8 4118 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4119 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 4120 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV2]], align 4 4121 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 4122 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 4123 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4124 // CHECK18-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 4125 // CHECK18-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 8 4126 // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4127 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 4128 // CHECK18-NEXT: store double* [[A]], double** [[TMP9]], align 8 4129 // CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 4130 // CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 8 4131 // CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4132 // CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 4133 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP12]], align 8 4134 // CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4135 // CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 4136 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP14]], align 8 4137 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 4138 // CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 4139 // CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4140 // CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 4141 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP17]], align 8 4142 // CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4143 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 4144 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP19]], align 8 4145 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 4146 // CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 8 4147 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4148 // CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4149 // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4150 // CHECK18-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 4151 // CHECK18-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 4152 // CHECK18-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4153 // CHECK18: omp_offload.failed: 4154 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2:[0-9]+]] 4155 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 4156 // CHECK18: omp_offload.cont: 4157 // CHECK18-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 4158 // CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 4159 // CHECK18-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 4160 // CHECK18-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 8 4161 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 4162 // CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 4163 // CHECK18-NEXT: store double* [[A3]], double** [[TMP29]], align 8 4164 // CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 4165 // CHECK18-NEXT: store i8* null, i8** [[TMP30]], align 8 4166 // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 4167 // CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 4168 // CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 4169 // CHECK18-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 4170 // CHECK18-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 4171 // CHECK18: omp_offload.failed7: 4172 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 4173 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT8]] 4174 // CHECK18: omp_offload.cont8: 4175 // CHECK18-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 4176 // CHECK18-NEXT: [[TMP35:%.*]] = load double, double* [[A9]], align 8 4177 // CHECK18-NEXT: [[CONV10:%.*]] = fptosi double [[TMP35]] to i32 4178 // CHECK18-NEXT: ret i32 [[CONV10]] 4179 // 4180 // 4181 // CHECK18-LABEL: define {{[^@]+}}@_ZL7fstatici 4182 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 4183 // CHECK18-NEXT: entry: 4184 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4185 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4186 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4187 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 4188 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8 4189 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 4190 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 4191 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 4192 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 4193 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED5:%.*]] = alloca i64, align 8 4194 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 4195 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 4196 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 4197 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4198 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4199 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 4200 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 4201 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 4202 // CHECK18-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4203 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4204 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 4205 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 4206 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 4207 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4208 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED2]] to i32* 4209 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 4210 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED2]], align 8 4211 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4212 // CHECK18-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 4213 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP7]], align 8 4214 // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4215 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 4216 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP9]], align 8 4217 // CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 4218 // CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 8 4219 // CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4220 // CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 4221 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP12]], align 8 4222 // CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4223 // CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* 4224 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP14]], align 8 4225 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 4226 // CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 4227 // CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4228 // CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4229 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4230 // CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4231 // CHECK18-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 4232 // CHECK18-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 4233 // CHECK18-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4234 // CHECK18: omp_offload.failed: 4235 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i64 [[TMP3]], i64 [[TMP5]]) #[[ATTR2]] 4236 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 4237 // CHECK18: omp_offload.cont: 4238 // CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 4239 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 4240 // CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_4]], align 4 4241 // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 4242 // CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5]] to i32* 4243 // CHECK18-NEXT: store i32 [[TMP23]], i32* [[CONV6]], align 4 4244 // CHECK18-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5]], align 8 4245 // CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 4246 // CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 4247 // CHECK18-NEXT: store i64 [[TMP24]], i64* [[TMP26]], align 8 4248 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 4249 // CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 4250 // CHECK18-NEXT: store i64 [[TMP24]], i64* [[TMP28]], align 8 4251 // CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 4252 // CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 4253 // CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 4254 // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 4255 // CHECK18-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 4256 // CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 4257 // CHECK18-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 4258 // CHECK18-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] 4259 // CHECK18: omp_offload.failed10: 4260 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i64 [[TMP24]]) #[[ATTR2]] 4261 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT11]] 4262 // CHECK18: omp_offload.cont11: 4263 // CHECK18-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 4264 // CHECK18-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP35]], 1 4265 // CHECK18-NEXT: ret i32 [[ADD12]] 4266 // 4267 // 4268 // CHECK18-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 4269 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 4270 // CHECK18-NEXT: entry: 4271 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4272 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 4273 // CHECK18-NEXT: [[B:%.*]] = alloca i16, align 2 4274 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 4275 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 4276 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 4277 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 4278 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 4279 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 4280 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 4281 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4282 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 4283 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 4284 // CHECK18-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 4285 // CHECK18-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4286 // CHECK18: omp_offload.failed: 4287 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 4288 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 4289 // CHECK18: omp_offload.cont: 4290 // CHECK18-NEXT: store i16 1, i16* [[B]], align 2 4291 // CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 4292 // CHECK18-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 4293 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4294 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 4295 // CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 4296 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 4297 // CHECK18-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 4298 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_CASTED]] to i16* 4299 // CHECK18-NEXT: store i16 [[TMP5]], i16* [[CONV1]], align 2 4300 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 4301 // CHECK18-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 4302 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i16* 4303 // CHECK18-NEXT: store i16 [[TMP7]], i16* [[CONV2]], align 2 4304 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 4305 // CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4306 // CHECK18-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 4307 // CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP10]], align 8 4308 // CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4309 // CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* 4310 // CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP12]], align 8 4311 // CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 4312 // CHECK18-NEXT: store i8* null, i8** [[TMP13]], align 8 4313 // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4314 // CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 4315 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 4316 // CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4317 // CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 4318 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 4319 // CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 4320 // CHECK18-NEXT: store i8* null, i8** [[TMP18]], align 8 4321 // CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4322 // CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 4323 // CHECK18-NEXT: store i64 [[TMP8]], i64* [[TMP20]], align 8 4324 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4325 // CHECK18-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 4326 // CHECK18-NEXT: store i64 [[TMP8]], i64* [[TMP22]], align 8 4327 // CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 4328 // CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8 4329 // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4330 // CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4331 // CHECK18-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 4332 // CHECK18-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 4333 // CHECK18-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 4334 // CHECK18-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 4335 // CHECK18-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] 4336 // CHECK18: omp_offload.failed3: 4337 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2]] 4338 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT4]] 4339 // CHECK18: omp_offload.cont4: 4340 // CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 4341 // CHECK18-NEXT: ret i32 [[TMP30]] 4342 // 4343 // 4344 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 4345 // CHECK18-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 4346 // CHECK18-NEXT: entry: 4347 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 4348 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4349 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 4350 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 4351 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4352 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 4353 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4354 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 4355 // CHECK18-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 4356 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 4357 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 4358 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 4359 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 4360 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 4361 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 4362 // CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 4363 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 4364 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 4365 // CHECK18-NEXT: ret void 4366 // 4367 // 4368 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined. 4369 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 4370 // CHECK18-NEXT: entry: 4371 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4372 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4373 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 4374 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4375 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4376 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4377 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 4378 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4379 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 4380 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 4381 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 4382 // CHECK18-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 4383 // CHECK18-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 4384 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 4385 // CHECK18-NEXT: store double [[ADD]], double* [[A]], align 8 4386 // CHECK18-NEXT: ret void 4387 // 4388 // 4389 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 4390 // CHECK18-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 4391 // CHECK18-NEXT: entry: 4392 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 4393 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4394 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 4395 // CHECK18-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 4396 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 4397 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 4398 // CHECK18-NEXT: ret void 4399 // 4400 // 4401 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1 4402 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 4403 // CHECK18-NEXT: entry: 4404 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4405 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4406 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 4407 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4408 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4409 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 4410 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 4411 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 4412 // CHECK18-NEXT: store double 2.500000e+00, double* [[A]], align 8 4413 // CHECK18-NEXT: ret void 4414 // 4415 // 4416 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 4417 // CHECK18-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 4418 // CHECK18-NEXT: entry: 4419 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 4420 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 4421 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4422 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 4423 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 4424 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 4425 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 4426 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 4427 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 4428 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 4429 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 4430 // CHECK18-NEXT: ret void 4431 // 4432 // 4433 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4 4434 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4435 // CHECK18-NEXT: entry: 4436 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4437 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4438 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4439 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4440 // CHECK18-NEXT: ret void 4441 // 4442 // 4443 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 4444 // CHECK18-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 4445 // CHECK18-NEXT: entry: 4446 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 4447 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4448 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 4449 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 4450 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 4451 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 4452 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 4453 // CHECK18-NEXT: ret void 4454 // 4455 // 4456 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 4457 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4458 // CHECK18-NEXT: entry: 4459 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4460 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4461 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4462 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4463 // CHECK18-NEXT: ret void 4464 // 4465 // 4466 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 4467 // CHECK18-SAME: () #[[ATTR1]] { 4468 // CHECK18-NEXT: entry: 4469 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4470 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 4471 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 4472 // CHECK18-NEXT: ret void 4473 // 4474 // 4475 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 4476 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4477 // CHECK18-NEXT: entry: 4478 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4479 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4480 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4481 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4482 // CHECK18-NEXT: ret void 4483 // 4484 // 4485 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 4486 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 4487 // CHECK18-NEXT: entry: 4488 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 4489 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4490 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 4491 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 4492 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 4493 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4494 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 4495 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4496 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 4497 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 4498 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 4499 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 4500 // CHECK18-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 4501 // CHECK18-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 4502 // CHECK18-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 4503 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 4504 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 4505 // CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 4506 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 4507 // CHECK18-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 4508 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 4509 // CHECK18-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 4510 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 4511 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 4512 // CHECK18-NEXT: ret void 4513 // 4514 // 4515 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 4516 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { 4517 // CHECK18-NEXT: entry: 4518 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4519 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4520 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 4521 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 4522 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4523 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4524 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 4525 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 4526 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 4527 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 4528 // CHECK18-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 4529 // CHECK18-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 4530 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 4531 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 4532 // CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 4533 // CHECK18-NEXT: ret void 4534 // 4535 // 4536 // CHECK18-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4537 // CHECK18-SAME: () #[[ATTR3:[0-9]+]] { 4538 // CHECK18-NEXT: entry: 4539 // CHECK18-NEXT: call void @__tgt_register_requires(i64 1) 4540 // CHECK18-NEXT: ret void 4541 // 4542 // 4543 // CHECK19-LABEL: define {{[^@]+}}@_Z3bari 4544 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 4545 // CHECK19-NEXT: entry: 4546 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4547 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 4548 // CHECK19-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 4549 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4550 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 4551 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4552 // CHECK19-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 4553 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 4554 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 4555 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A]], align 4 4556 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 4557 // CHECK19-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 4558 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4559 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 4560 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 4561 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4562 // CHECK19-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 4563 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 4564 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 4565 // CHECK19-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 4566 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 4567 // CHECK19-NEXT: ret i32 [[TMP6]] 4568 // 4569 // 4570 // CHECK19-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 4571 // CHECK19-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 4572 // CHECK19-NEXT: entry: 4573 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4574 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4575 // CHECK19-NEXT: [[B:%.*]] = alloca i32, align 4 4576 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4577 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4578 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4579 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 4580 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 4581 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 4582 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 4583 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 4584 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 4585 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4586 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4587 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4588 // CHECK19-NEXT: store i32 1, i32* [[B]], align 4 4589 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4590 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 4591 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 4592 // CHECK19-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 4593 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 4594 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[B_CASTED]], align 4 4595 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_CASTED]], align 4 4596 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4597 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4598 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4599 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 4600 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4601 // CHECK19-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 4602 // CHECK19-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 4 4603 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4604 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 4605 // CHECK19-NEXT: store double* [[A]], double** [[TMP9]], align 4 4606 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4607 // CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 4608 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4609 // CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 4610 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP12]], align 4 4611 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4612 // CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 4613 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP14]], align 4 4614 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4615 // CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 4616 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4617 // CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 4618 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 4619 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4620 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 4621 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 4622 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4623 // CHECK19-NEXT: store i8* null, i8** [[TMP20]], align 4 4624 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4625 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4626 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4627 // CHECK19-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 4628 // CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 4629 // CHECK19-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4630 // CHECK19: omp_offload.failed: 4631 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2:[0-9]+]] 4632 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 4633 // CHECK19: omp_offload.cont: 4634 // CHECK19-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 4635 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 4636 // CHECK19-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 4637 // CHECK19-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 4 4638 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 4639 // CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 4640 // CHECK19-NEXT: store double* [[A2]], double** [[TMP29]], align 4 4641 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 4642 // CHECK19-NEXT: store i8* null, i8** [[TMP30]], align 4 4643 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 4644 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 4645 // CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 4646 // CHECK19-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 4647 // CHECK19-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 4648 // CHECK19: omp_offload.failed6: 4649 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 4650 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT7]] 4651 // CHECK19: omp_offload.cont7: 4652 // CHECK19-NEXT: [[A8:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 4653 // CHECK19-NEXT: [[TMP35:%.*]] = load double, double* [[A8]], align 4 4654 // CHECK19-NEXT: [[CONV:%.*]] = fptosi double [[TMP35]] to i32 4655 // CHECK19-NEXT: ret i32 [[CONV]] 4656 // 4657 // 4658 // CHECK19-LABEL: define {{[^@]+}}@_ZL7fstatici 4659 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4660 // CHECK19-NEXT: entry: 4661 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4662 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4663 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4664 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4665 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4 4666 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 4667 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 4668 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 4669 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 4670 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i32, align 4 4671 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [1 x i8*], align 4 4672 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [1 x i8*], align 4 4673 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [1 x i8*], align 4 4674 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4675 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4676 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 4677 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 4678 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 4679 // CHECK19-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4680 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4681 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4682 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4683 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4684 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 4685 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 4686 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4687 // CHECK19-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 4688 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 4689 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4690 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 4691 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 4692 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4693 // CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 4694 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4695 // CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 4696 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP12]], align 4 4697 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4698 // CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 4699 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 4700 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4701 // CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 4702 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4703 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4704 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4705 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4706 // CHECK19-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 4707 // CHECK19-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 4708 // CHECK19-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4709 // CHECK19: omp_offload.failed: 4710 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2]] 4711 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 4712 // CHECK19: omp_offload.cont: 4713 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 4714 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 4715 // CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_3]], align 4 4716 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 4717 // CHECK19-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 4718 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 4719 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 4720 // CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 4721 // CHECK19-NEXT: store i32 [[TMP24]], i32* [[TMP26]], align 4 4722 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 4723 // CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 4724 // CHECK19-NEXT: store i32 [[TMP24]], i32* [[TMP28]], align 4 4725 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0 4726 // CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 4727 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 4728 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 4729 // CHECK19-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 4730 // CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 4731 // CHECK19-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 4732 // CHECK19-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] 4733 // CHECK19: omp_offload.failed8: 4734 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i32 [[TMP24]]) #[[ATTR2]] 4735 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT9]] 4736 // CHECK19: omp_offload.cont9: 4737 // CHECK19-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 4738 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP35]], 1 4739 // CHECK19-NEXT: ret i32 [[ADD10]] 4740 // 4741 // 4742 // CHECK19-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 4743 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 4744 // CHECK19-NEXT: entry: 4745 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4746 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 4747 // CHECK19-NEXT: [[B:%.*]] = alloca i16, align 2 4748 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 4749 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4750 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4751 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4752 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 4753 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 4754 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 4755 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4756 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 4757 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 4758 // CHECK19-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 4759 // CHECK19-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4760 // CHECK19: omp_offload.failed: 4761 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 4762 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 4763 // CHECK19: omp_offload.cont: 4764 // CHECK19-NEXT: store i16 1, i16* [[B]], align 2 4765 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 4766 // CHECK19-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 4767 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4768 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 4769 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 4770 // CHECK19-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 4771 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[B_CASTED]] to i16* 4772 // CHECK19-NEXT: store i16 [[TMP5]], i16* [[CONV]], align 2 4773 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 4774 // CHECK19-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 4775 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i16* 4776 // CHECK19-NEXT: store i16 [[TMP7]], i16* [[CONV1]], align 2 4777 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4778 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4779 // CHECK19-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 4780 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[TMP10]], align 4 4781 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4782 // CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 4783 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[TMP12]], align 4 4784 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4785 // CHECK19-NEXT: store i8* null, i8** [[TMP13]], align 4 4786 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4787 // CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 4788 // CHECK19-NEXT: store i32 [[TMP6]], i32* [[TMP15]], align 4 4789 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4790 // CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 4791 // CHECK19-NEXT: store i32 [[TMP6]], i32* [[TMP17]], align 4 4792 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4793 // CHECK19-NEXT: store i8* null, i8** [[TMP18]], align 4 4794 // CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4795 // CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 4796 // CHECK19-NEXT: store i32 [[TMP8]], i32* [[TMP20]], align 4 4797 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4798 // CHECK19-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 4799 // CHECK19-NEXT: store i32 [[TMP8]], i32* [[TMP22]], align 4 4800 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4801 // CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4 4802 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4803 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4804 // CHECK19-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 4805 // CHECK19-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 4806 // CHECK19-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 4807 // CHECK19-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 4808 // CHECK19-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 4809 // CHECK19: omp_offload.failed2: 4810 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR2]] 4811 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT3]] 4812 // CHECK19: omp_offload.cont3: 4813 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 4814 // CHECK19-NEXT: ret i32 [[TMP30]] 4815 // 4816 // 4817 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 4818 // CHECK19-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 4819 // CHECK19-NEXT: entry: 4820 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4821 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4822 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4823 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4824 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4825 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4826 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4827 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4828 // CHECK19-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4829 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4830 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 4831 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 4832 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 4833 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 4834 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 4835 // CHECK19-NEXT: ret void 4836 // 4837 // 4838 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined. 4839 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 4840 // CHECK19-NEXT: entry: 4841 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4842 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4843 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4844 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4845 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4846 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4847 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4848 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4849 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4850 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 4851 // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 4852 // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 4853 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 4854 // CHECK19-NEXT: store double [[ADD]], double* [[A]], align 4 4855 // CHECK19-NEXT: ret void 4856 // 4857 // 4858 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 4859 // CHECK19-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 4860 // CHECK19-NEXT: entry: 4861 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4862 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4863 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4864 // CHECK19-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4865 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 4866 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 4867 // CHECK19-NEXT: ret void 4868 // 4869 // 4870 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1 4871 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 4872 // CHECK19-NEXT: entry: 4873 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4874 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4875 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4876 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4877 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4878 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4879 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4880 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 4881 // CHECK19-NEXT: store double 2.500000e+00, double* [[A]], align 4 4882 // CHECK19-NEXT: ret void 4883 // 4884 // 4885 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 4886 // CHECK19-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 4887 // CHECK19-NEXT: entry: 4888 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4889 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 4890 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4891 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4892 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 4893 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4894 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 4895 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 4896 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 4897 // CHECK19-NEXT: ret void 4898 // 4899 // 4900 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4 4901 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4902 // CHECK19-NEXT: entry: 4903 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4904 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4905 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4906 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4907 // CHECK19-NEXT: ret void 4908 // 4909 // 4910 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 4911 // CHECK19-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 4912 // CHECK19-NEXT: entry: 4913 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4914 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4915 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4916 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4917 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 4918 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 4919 // CHECK19-NEXT: ret void 4920 // 4921 // 4922 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 4923 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4924 // CHECK19-NEXT: entry: 4925 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4926 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4927 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4928 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4929 // CHECK19-NEXT: ret void 4930 // 4931 // 4932 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 4933 // CHECK19-SAME: () #[[ATTR1]] { 4934 // CHECK19-NEXT: entry: 4935 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4936 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 4937 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 4938 // CHECK19-NEXT: ret void 4939 // 4940 // 4941 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 4942 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4943 // CHECK19-NEXT: entry: 4944 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4945 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4946 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4947 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4948 // CHECK19-NEXT: ret void 4949 // 4950 // 4951 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 4952 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 4953 // CHECK19-NEXT: entry: 4954 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4955 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4956 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4957 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4958 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4959 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 4960 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4961 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4962 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4963 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 4964 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 4965 // CHECK19-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 4966 // CHECK19-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 4967 // CHECK19-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 4968 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 4969 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 4970 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 4971 // CHECK19-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 4972 // CHECK19-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 4973 // CHECK19-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 4974 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 4975 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 4976 // CHECK19-NEXT: ret void 4977 // 4978 // 4979 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 4980 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 4981 // CHECK19-NEXT: entry: 4982 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4983 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4984 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4985 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4986 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4987 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4988 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4989 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4990 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 4991 // CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 4992 // CHECK19-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 4993 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4994 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 4995 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 4996 // CHECK19-NEXT: ret void 4997 // 4998 // 4999 // CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 5000 // CHECK19-SAME: () #[[ATTR3:[0-9]+]] { 5001 // CHECK19-NEXT: entry: 5002 // CHECK19-NEXT: call void @__tgt_register_requires(i64 1) 5003 // CHECK19-NEXT: ret void 5004 // 5005 // 5006 // CHECK20-LABEL: define {{[^@]+}}@_Z3bari 5007 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 5008 // CHECK20-NEXT: entry: 5009 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5010 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 5011 // CHECK20-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 5012 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5013 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 5014 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5015 // CHECK20-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 5016 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5017 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5018 // CHECK20-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5019 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5020 // CHECK20-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 5021 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5022 // CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5023 // CHECK20-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5024 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5025 // CHECK20-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 5026 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5027 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5028 // CHECK20-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5029 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 5030 // CHECK20-NEXT: ret i32 [[TMP6]] 5031 // 5032 // 5033 // CHECK20-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5034 // CHECK20-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5035 // CHECK20-NEXT: entry: 5036 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5037 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5038 // CHECK20-NEXT: [[B:%.*]] = alloca i32, align 4 5039 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5040 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 5041 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5042 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 5043 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 5044 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 5045 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 5046 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 5047 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 5048 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5049 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5050 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5051 // CHECK20-NEXT: store i32 1, i32* [[B]], align 4 5052 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5053 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 5054 // CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 5055 // CHECK20-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 5056 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 5057 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[B_CASTED]], align 4 5058 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_CASTED]], align 4 5059 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5060 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5061 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5062 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 5063 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5064 // CHECK20-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to %struct.S1** 5065 // CHECK20-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP7]], align 4 5066 // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5067 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to double** 5068 // CHECK20-NEXT: store double* [[A]], double** [[TMP9]], align 4 5069 // CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5070 // CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 5071 // CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5072 // CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 5073 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP12]], align 4 5074 // CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5075 // CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 5076 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP14]], align 4 5077 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5078 // CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 5079 // CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 5080 // CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 5081 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 5082 // CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 5083 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 5084 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 5085 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 5086 // CHECK20-NEXT: store i8* null, i8** [[TMP20]], align 4 5087 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5088 // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5089 // CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5090 // CHECK20-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP23]]) 5091 // CHECK20-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 5092 // CHECK20-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5093 // CHECK20: omp_offload.failed: 5094 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121(%struct.S1* [[THIS1]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2:[0-9]+]] 5095 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 5096 // CHECK20: omp_offload.cont: 5097 // CHECK20-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5098 // CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 5099 // CHECK20-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to %struct.S1** 5100 // CHECK20-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP27]], align 4 5101 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 5102 // CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to double** 5103 // CHECK20-NEXT: store double* [[A2]], double** [[TMP29]], align 4 5104 // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 5105 // CHECK20-NEXT: store i8* null, i8** [[TMP30]], align 4 5106 // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 5107 // CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 5108 // CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126.region_id, i32 1, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1024) 5109 // CHECK20-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 5110 // CHECK20-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 5111 // CHECK20: omp_offload.failed6: 5112 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126(%struct.S1* [[THIS1]]) #[[ATTR2]] 5113 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT7]] 5114 // CHECK20: omp_offload.cont7: 5115 // CHECK20-NEXT: [[A8:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5116 // CHECK20-NEXT: [[TMP35:%.*]] = load double, double* [[A8]], align 4 5117 // CHECK20-NEXT: [[CONV:%.*]] = fptosi double [[TMP35]] to i32 5118 // CHECK20-NEXT: ret i32 [[CONV]] 5119 // 5120 // 5121 // CHECK20-LABEL: define {{[^@]+}}@_ZL7fstatici 5122 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 5123 // CHECK20-NEXT: entry: 5124 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5125 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5126 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5127 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5128 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4 5129 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 5130 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 5131 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 5132 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 5133 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i32, align 4 5134 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [1 x i8*], align 4 5135 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [1 x i8*], align 4 5136 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [1 x i8*], align 4 5137 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5138 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5139 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 5140 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5141 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 5142 // CHECK20-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5143 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5144 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5145 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5146 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5147 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 5148 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED2]], align 4 5149 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5150 // CHECK20-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 5151 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 5152 // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5153 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 5154 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 5155 // CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5156 // CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 5157 // CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5158 // CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 5159 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP12]], align 4 5160 // CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5161 // CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* 5162 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 5163 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5164 // CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 5165 // CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5166 // CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5167 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5168 // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5169 // CHECK20-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104.region_id, i32 2, i8** [[TMP16]], i8** [[TMP17]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 [[TMP18]], i32 [[TMP19]]) 5170 // CHECK20-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 5171 // CHECK20-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5172 // CHECK20: omp_offload.failed: 5173 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104(i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR2]] 5174 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 5175 // CHECK20: omp_offload.cont: 5176 // CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[N_ADDR]], align 4 5177 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP22]] 5178 // CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_3]], align 4 5179 // CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 5180 // CHECK20-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 5181 // CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4]], align 4 5182 // CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 5183 // CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 5184 // CHECK20-NEXT: store i32 [[TMP24]], i32* [[TMP26]], align 4 5185 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 5186 // CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 5187 // CHECK20-NEXT: store i32 [[TMP24]], i32* [[TMP28]], align 4 5188 // CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i32 0, i32 0 5189 // CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 5190 // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 5191 // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 5192 // CHECK20-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 5193 // CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108.region_id, i32 1, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 [[TMP32]]) 5194 // CHECK20-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 5195 // CHECK20-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] 5196 // CHECK20: omp_offload.failed8: 5197 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108(i32 [[TMP24]]) #[[ATTR2]] 5198 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT9]] 5199 // CHECK20: omp_offload.cont9: 5200 // CHECK20-NEXT: [[TMP35:%.*]] = load i32, i32* [[N_ADDR]], align 4 5201 // CHECK20-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP35]], 1 5202 // CHECK20-NEXT: ret i32 [[ADD10]] 5203 // 5204 // 5205 // CHECK20-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 5206 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 5207 // CHECK20-NEXT: entry: 5208 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5209 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 5210 // CHECK20-NEXT: [[B:%.*]] = alloca i16, align 2 5211 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 5212 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5213 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 5214 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5215 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 5216 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 5217 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 5218 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5219 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 5220 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 20) 5221 // CHECK20-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 5222 // CHECK20-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5223 // CHECK20: omp_offload.failed: 5224 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88() #[[ATTR2]] 5225 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 5226 // CHECK20: omp_offload.cont: 5227 // CHECK20-NEXT: store i16 1, i16* [[B]], align 2 5228 // CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[B]], align 2 5229 // CHECK20-NEXT: store i16 [[TMP2]], i16* [[DOTCAPTURE_EXPR_]], align 2 5230 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5231 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 5232 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 5233 // CHECK20-NEXT: [[TMP5:%.*]] = load i16, i16* [[B]], align 2 5234 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[B_CASTED]] to i16* 5235 // CHECK20-NEXT: store i16 [[TMP5]], i16* [[CONV]], align 2 5236 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 5237 // CHECK20-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 5238 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i16* 5239 // CHECK20-NEXT: store i16 [[TMP7]], i16* [[CONV1]], align 2 5240 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5241 // CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5242 // CHECK20-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 5243 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[TMP10]], align 4 5244 // CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5245 // CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* 5246 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[TMP12]], align 4 5247 // CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5248 // CHECK20-NEXT: store i8* null, i8** [[TMP13]], align 4 5249 // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5250 // CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 5251 // CHECK20-NEXT: store i32 [[TMP6]], i32* [[TMP15]], align 4 5252 // CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5253 // CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 5254 // CHECK20-NEXT: store i32 [[TMP6]], i32* [[TMP17]], align 4 5255 // CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5256 // CHECK20-NEXT: store i8* null, i8** [[TMP18]], align 4 5257 // CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 5258 // CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 5259 // CHECK20-NEXT: store i32 [[TMP8]], i32* [[TMP20]], align 4 5260 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 5261 // CHECK20-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 5262 // CHECK20-NEXT: store i32 [[TMP8]], i32* [[TMP22]], align 4 5263 // CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 5264 // CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4 5265 // CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5266 // CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5267 // CHECK20-NEXT: [[TMP26:%.*]] = load i16, i16* [[DOTCAPTURE_EXPR_]], align 2 5268 // CHECK20-NEXT: [[TMP27:%.*]] = sext i16 [[TMP26]] to i32 5269 // CHECK20-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 [[TMP27]], i32 1024) 5270 // CHECK20-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 5271 // CHECK20-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 5272 // CHECK20: omp_offload.failed2: 5273 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93(i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR2]] 5274 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT3]] 5275 // CHECK20: omp_offload.cont3: 5276 // CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[A]], align 4 5277 // CHECK20-NEXT: ret i32 [[TMP30]] 5278 // 5279 // 5280 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 5281 // CHECK20-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] { 5282 // CHECK20-NEXT: entry: 5283 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5284 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 5285 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5286 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 5287 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5288 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5289 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 5290 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5291 // CHECK20-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5292 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5293 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 5294 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 5295 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 5296 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 5297 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 5298 // CHECK20-NEXT: ret void 5299 // 5300 // 5301 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined. 5302 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 5303 // CHECK20-NEXT: entry: 5304 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5305 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5306 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5307 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 5308 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5309 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5310 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5311 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 5312 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5313 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 5314 // CHECK20-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 5315 // CHECK20-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5316 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 5317 // CHECK20-NEXT: store double [[ADD]], double* [[A]], align 4 5318 // CHECK20-NEXT: ret void 5319 // 5320 // 5321 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 5322 // CHECK20-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 5323 // CHECK20-NEXT: entry: 5324 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5325 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5326 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5327 // CHECK20-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5328 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 5329 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 5330 // CHECK20-NEXT: ret void 5331 // 5332 // 5333 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1 5334 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR1]] { 5335 // CHECK20-NEXT: entry: 5336 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5337 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5338 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5339 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5340 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5341 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5342 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5343 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 5344 // CHECK20-NEXT: store double 2.500000e+00, double* [[A]], align 4 5345 // CHECK20-NEXT: ret void 5346 // 5347 // 5348 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 5349 // CHECK20-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { 5350 // CHECK20-NEXT: entry: 5351 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5352 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 5353 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5354 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5355 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 5356 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5357 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 5358 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 5359 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 5360 // CHECK20-NEXT: ret void 5361 // 5362 // 5363 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4 5364 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5365 // CHECK20-NEXT: entry: 5366 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5367 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5368 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5369 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5370 // CHECK20-NEXT: ret void 5371 // 5372 // 5373 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 5374 // CHECK20-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 5375 // CHECK20-NEXT: entry: 5376 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5377 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5378 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5379 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5380 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 5381 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*)) 5382 // CHECK20-NEXT: ret void 5383 // 5384 // 5385 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 5386 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5387 // CHECK20-NEXT: entry: 5388 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5389 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5390 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5391 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5392 // CHECK20-NEXT: ret void 5393 // 5394 // 5395 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 5396 // CHECK20-SAME: () #[[ATTR1]] { 5397 // CHECK20-NEXT: entry: 5398 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5399 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 5400 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 5401 // CHECK20-NEXT: ret void 5402 // 5403 // 5404 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 5405 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5406 // CHECK20-NEXT: entry: 5407 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5408 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5409 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5410 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5411 // CHECK20-NEXT: ret void 5412 // 5413 // 5414 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 5415 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 5416 // CHECK20-NEXT: entry: 5417 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5418 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 5419 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5420 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5421 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 5422 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5423 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5424 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 5425 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5426 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 5427 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 5428 // CHECK20-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 5429 // CHECK20-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 5430 // CHECK20-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 5431 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 5432 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 5433 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 5434 // CHECK20-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 5435 // CHECK20-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 5436 // CHECK20-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 5437 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 5438 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 5439 // CHECK20-NEXT: ret void 5440 // 5441 // 5442 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 5443 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { 5444 // CHECK20-NEXT: entry: 5445 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5446 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5447 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5448 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 5449 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5450 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5451 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5452 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 5453 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 5454 // CHECK20-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 5455 // CHECK20-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 5456 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5457 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 5458 // CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 5459 // CHECK20-NEXT: ret void 5460 // 5461 // 5462 // CHECK20-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 5463 // CHECK20-SAME: () #[[ATTR3:[0-9]+]] { 5464 // CHECK20-NEXT: entry: 5465 // CHECK20-NEXT: call void @__tgt_register_requires(i64 1) 5466 // CHECK20-NEXT: ret void 5467 // 5468 // 5469 // CHECK21-LABEL: define {{[^@]+}}@_Z3bari 5470 // CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 5471 // CHECK21-NEXT: entry: 5472 // CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5473 // CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4 5474 // CHECK21-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 5475 // CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5476 // CHECK21-NEXT: store i32 0, i32* [[A]], align 4 5477 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5478 // CHECK21-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 5479 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5480 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5481 // CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5482 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5483 // CHECK21-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 5484 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5485 // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5486 // CHECK21-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5487 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5488 // CHECK21-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 5489 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5490 // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5491 // CHECK21-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5492 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 5493 // CHECK21-NEXT: ret i32 [[TMP6]] 5494 // 5495 // 5496 // CHECK21-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5497 // CHECK21-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5498 // CHECK21-NEXT: entry: 5499 // CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5500 // CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5501 // CHECK21-NEXT: [[B:%.*]] = alloca i32, align 4 5502 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5503 // CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5504 // CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5505 // CHECK21-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5506 // CHECK21-NEXT: store i32 1, i32* [[B]], align 4 5507 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5508 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 5509 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 5510 // CHECK21-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 5511 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 5512 // CHECK21-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 5513 // CHECK21-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5514 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 5515 // CHECK21-NEXT: store double [[ADD]], double* [[A]], align 8 5516 // CHECK21-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5517 // CHECK21-NEXT: store double 2.500000e+00, double* [[A2]], align 8 5518 // CHECK21-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5519 // CHECK21-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 5520 // CHECK21-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 5521 // CHECK21-NEXT: ret i32 [[CONV4]] 5522 // 5523 // 5524 // CHECK21-LABEL: define {{[^@]+}}@_ZL7fstatici 5525 // CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 5526 // CHECK21-NEXT: entry: 5527 // CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5528 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5529 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5530 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 5531 // CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5532 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5533 // CHECK21-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 5534 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5535 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 5536 // CHECK21-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5537 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5538 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 5539 // CHECK21-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 5540 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5541 // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 5542 // CHECK21-NEXT: ret i32 [[ADD3]] 5543 // 5544 // 5545 // CHECK21-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 5546 // CHECK21-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 5547 // CHECK21-NEXT: entry: 5548 // CHECK21-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5549 // CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4 5550 // CHECK21-NEXT: [[B:%.*]] = alloca i16, align 2 5551 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 5552 // CHECK21-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5553 // CHECK21-NEXT: store i32 0, i32* [[A]], align 4 5554 // CHECK21-NEXT: store i16 1, i16* [[B]], align 2 5555 // CHECK21-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 5556 // CHECK21-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 5557 // CHECK21-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 5558 // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 5559 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 5560 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 5561 // CHECK21-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5562 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5563 // CHECK21-NEXT: ret i32 [[TMP3]] 5564 // 5565 // 5566 // CHECK22-LABEL: define {{[^@]+}}@_Z3bari 5567 // CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 5568 // CHECK22-NEXT: entry: 5569 // CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5570 // CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4 5571 // CHECK22-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 5572 // CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5573 // CHECK22-NEXT: store i32 0, i32* [[A]], align 4 5574 // CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5575 // CHECK22-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 5576 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5577 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5578 // CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5579 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5580 // CHECK22-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 5581 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5582 // CHECK22-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5583 // CHECK22-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5584 // CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5585 // CHECK22-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 5586 // CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5587 // CHECK22-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5588 // CHECK22-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5589 // CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 5590 // CHECK22-NEXT: ret i32 [[TMP6]] 5591 // 5592 // 5593 // CHECK22-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5594 // CHECK22-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5595 // CHECK22-NEXT: entry: 5596 // CHECK22-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5597 // CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5598 // CHECK22-NEXT: [[B:%.*]] = alloca i32, align 4 5599 // CHECK22-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5600 // CHECK22-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5601 // CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5602 // CHECK22-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5603 // CHECK22-NEXT: store i32 1, i32* [[B]], align 4 5604 // CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5605 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 5606 // CHECK22-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 5607 // CHECK22-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 5608 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 5609 // CHECK22-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 5610 // CHECK22-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5611 // CHECK22-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 5612 // CHECK22-NEXT: store double [[ADD]], double* [[A]], align 8 5613 // CHECK22-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5614 // CHECK22-NEXT: store double 2.500000e+00, double* [[A2]], align 8 5615 // CHECK22-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5616 // CHECK22-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 5617 // CHECK22-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 5618 // CHECK22-NEXT: ret i32 [[CONV4]] 5619 // 5620 // 5621 // CHECK22-LABEL: define {{[^@]+}}@_ZL7fstatici 5622 // CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 5623 // CHECK22-NEXT: entry: 5624 // CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5625 // CHECK22-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5626 // CHECK22-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5627 // CHECK22-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 5628 // CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5629 // CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5630 // CHECK22-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 5631 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5632 // CHECK22-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 5633 // CHECK22-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5634 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5635 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 5636 // CHECK22-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 5637 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5638 // CHECK22-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 5639 // CHECK22-NEXT: ret i32 [[ADD3]] 5640 // 5641 // 5642 // CHECK22-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 5643 // CHECK22-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 5644 // CHECK22-NEXT: entry: 5645 // CHECK22-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5646 // CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4 5647 // CHECK22-NEXT: [[B:%.*]] = alloca i16, align 2 5648 // CHECK22-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 5649 // CHECK22-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5650 // CHECK22-NEXT: store i32 0, i32* [[A]], align 4 5651 // CHECK22-NEXT: store i16 1, i16* [[B]], align 2 5652 // CHECK22-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 5653 // CHECK22-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 5654 // CHECK22-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 5655 // CHECK22-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 5656 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 5657 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 5658 // CHECK22-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5659 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5660 // CHECK22-NEXT: ret i32 [[TMP3]] 5661 // 5662 // 5663 // CHECK23-LABEL: define {{[^@]+}}@_Z3bari 5664 // CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 5665 // CHECK23-NEXT: entry: 5666 // CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5667 // CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4 5668 // CHECK23-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 5669 // CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5670 // CHECK23-NEXT: store i32 0, i32* [[A]], align 4 5671 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5672 // CHECK23-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 5673 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5674 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5675 // CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5676 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5677 // CHECK23-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 5678 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5679 // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5680 // CHECK23-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5681 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5682 // CHECK23-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 5683 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5684 // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5685 // CHECK23-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5686 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 5687 // CHECK23-NEXT: ret i32 [[TMP6]] 5688 // 5689 // 5690 // CHECK23-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5691 // CHECK23-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5692 // CHECK23-NEXT: entry: 5693 // CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5694 // CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5695 // CHECK23-NEXT: [[B:%.*]] = alloca i32, align 4 5696 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5697 // CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5698 // CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5699 // CHECK23-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5700 // CHECK23-NEXT: store i32 1, i32* [[B]], align 4 5701 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5702 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 5703 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 5704 // CHECK23-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 5705 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 5706 // CHECK23-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 5707 // CHECK23-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5708 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 5709 // CHECK23-NEXT: store double [[ADD]], double* [[A]], align 4 5710 // CHECK23-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5711 // CHECK23-NEXT: store double 2.500000e+00, double* [[A2]], align 4 5712 // CHECK23-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5713 // CHECK23-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 5714 // CHECK23-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 5715 // CHECK23-NEXT: ret i32 [[CONV4]] 5716 // 5717 // 5718 // CHECK23-LABEL: define {{[^@]+}}@_ZL7fstatici 5719 // CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 5720 // CHECK23-NEXT: entry: 5721 // CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5722 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5723 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5724 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 5725 // CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5726 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5727 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 5728 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5729 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 5730 // CHECK23-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5731 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5732 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 5733 // CHECK23-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 5734 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5735 // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 5736 // CHECK23-NEXT: ret i32 [[ADD3]] 5737 // 5738 // 5739 // CHECK23-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 5740 // CHECK23-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 5741 // CHECK23-NEXT: entry: 5742 // CHECK23-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5743 // CHECK23-NEXT: [[A:%.*]] = alloca i32, align 4 5744 // CHECK23-NEXT: [[B:%.*]] = alloca i16, align 2 5745 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 5746 // CHECK23-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5747 // CHECK23-NEXT: store i32 0, i32* [[A]], align 4 5748 // CHECK23-NEXT: store i16 1, i16* [[B]], align 2 5749 // CHECK23-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 5750 // CHECK23-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 5751 // CHECK23-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 5752 // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 5753 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 5754 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 5755 // CHECK23-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5756 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5757 // CHECK23-NEXT: ret i32 [[TMP3]] 5758 // 5759 // 5760 // CHECK24-LABEL: define {{[^@]+}}@_Z3bari 5761 // CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 5762 // CHECK24-NEXT: entry: 5763 // CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5764 // CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4 5765 // CHECK24-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 5766 // CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5767 // CHECK24-NEXT: store i32 0, i32* [[A]], align 4 5768 // CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5769 // CHECK24-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 5770 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5771 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5772 // CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5773 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5774 // CHECK24-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 5775 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5776 // CHECK24-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5777 // CHECK24-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5778 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5779 // CHECK24-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 5780 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5781 // CHECK24-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5782 // CHECK24-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5783 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 5784 // CHECK24-NEXT: ret i32 [[TMP6]] 5785 // 5786 // 5787 // CHECK24-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5788 // CHECK24-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5789 // CHECK24-NEXT: entry: 5790 // CHECK24-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5791 // CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5792 // CHECK24-NEXT: [[B:%.*]] = alloca i32, align 4 5793 // CHECK24-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5794 // CHECK24-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5795 // CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5796 // CHECK24-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5797 // CHECK24-NEXT: store i32 1, i32* [[B]], align 4 5798 // CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5799 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 5800 // CHECK24-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 5801 // CHECK24-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 5802 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 5803 // CHECK24-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 5804 // CHECK24-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5805 // CHECK24-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 5806 // CHECK24-NEXT: store double [[ADD]], double* [[A]], align 4 5807 // CHECK24-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5808 // CHECK24-NEXT: store double 2.500000e+00, double* [[A2]], align 4 5809 // CHECK24-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 5810 // CHECK24-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 5811 // CHECK24-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 5812 // CHECK24-NEXT: ret i32 [[CONV4]] 5813 // 5814 // 5815 // CHECK24-LABEL: define {{[^@]+}}@_ZL7fstatici 5816 // CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 5817 // CHECK24-NEXT: entry: 5818 // CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5819 // CHECK24-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5820 // CHECK24-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5821 // CHECK24-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 5822 // CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5823 // CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5824 // CHECK24-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 5825 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5826 // CHECK24-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 5827 // CHECK24-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5828 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5829 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 5830 // CHECK24-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 5831 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5832 // CHECK24-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 5833 // CHECK24-NEXT: ret i32 [[ADD3]] 5834 // 5835 // 5836 // CHECK24-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 5837 // CHECK24-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 5838 // CHECK24-NEXT: entry: 5839 // CHECK24-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5840 // CHECK24-NEXT: [[A:%.*]] = alloca i32, align 4 5841 // CHECK24-NEXT: [[B:%.*]] = alloca i16, align 2 5842 // CHECK24-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 5843 // CHECK24-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5844 // CHECK24-NEXT: store i32 0, i32* [[A]], align 4 5845 // CHECK24-NEXT: store i16 1, i16* [[B]], align 2 5846 // CHECK24-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 5847 // CHECK24-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 5848 // CHECK24-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 5849 // CHECK24-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 5850 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 5851 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 5852 // CHECK24-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5853 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5854 // CHECK24-NEXT: ret i32 [[TMP3]] 5855 // 5856 // 5857 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 5858 // CHECK25-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 5859 // CHECK25-NEXT: entry: 5860 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 5861 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 5862 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 5863 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 5864 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 5865 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 5866 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 5867 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 5868 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 5869 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 5870 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 5871 // CHECK25-NEXT: ret void 5872 // 5873 // 5874 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined. 5875 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 5876 // CHECK25-NEXT: entry: 5877 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5878 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5879 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5880 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5881 // CHECK25-NEXT: ret void 5882 // 5883 // 5884 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 5885 // CHECK25-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 5886 // CHECK25-NEXT: entry: 5887 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 5888 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5889 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 5890 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 5891 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 5892 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 5893 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 5894 // CHECK25-NEXT: ret void 5895 // 5896 // 5897 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..1 5898 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 5899 // CHECK25-NEXT: entry: 5900 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5901 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5902 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5903 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5904 // CHECK25-NEXT: ret void 5905 // 5906 // 5907 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 5908 // CHECK25-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 5909 // CHECK25-NEXT: entry: 5910 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5911 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 5912 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 5913 // CHECK25-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 5914 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5915 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5916 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 5917 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 5918 // CHECK25-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5919 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 5920 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 5921 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 5922 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 5923 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 5924 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 5925 // CHECK25-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 5926 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 5927 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 5928 // CHECK25-NEXT: ret void 5929 // 5930 // 5931 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..2 5932 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 5933 // CHECK25-NEXT: entry: 5934 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5935 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5936 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5937 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 5938 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5939 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5940 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5941 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 5942 // CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5943 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 5944 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 5945 // CHECK25-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 5946 // CHECK25-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 5947 // CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 5948 // CHECK25-NEXT: store double [[ADD]], double* [[A]], align 8 5949 // CHECK25-NEXT: ret void 5950 // 5951 // 5952 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 5953 // CHECK25-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 5954 // CHECK25-NEXT: entry: 5955 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5956 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5957 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5958 // CHECK25-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5959 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 5960 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 5961 // CHECK25-NEXT: ret void 5962 // 5963 // 5964 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..3 5965 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 5966 // CHECK25-NEXT: entry: 5967 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5968 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5969 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 5970 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5971 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5972 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 5973 // CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 5974 // CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 5975 // CHECK25-NEXT: store double 2.500000e+00, double* [[A]], align 8 5976 // CHECK25-NEXT: ret void 5977 // 5978 // 5979 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 5980 // CHECK25-SAME: () #[[ATTR0]] { 5981 // CHECK25-NEXT: entry: 5982 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 5983 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 5984 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 5985 // CHECK25-NEXT: ret void 5986 // 5987 // 5988 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..4 5989 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 5990 // CHECK25-NEXT: entry: 5991 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5992 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5993 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5994 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5995 // CHECK25-NEXT: ret void 5996 // 5997 // 5998 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 5999 // CHECK25-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6000 // CHECK25-NEXT: entry: 6001 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6002 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6003 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 6004 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 6005 // CHECK25-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 6006 // CHECK25-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6007 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6008 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6009 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 6010 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6011 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 6012 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 6013 // CHECK25-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 6014 // CHECK25-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 6015 // CHECK25-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 6016 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 6017 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 6018 // CHECK25-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 6019 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 6020 // CHECK25-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 6021 // CHECK25-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 6022 // CHECK25-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 6023 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 6024 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 6025 // CHECK25-NEXT: ret void 6026 // 6027 // 6028 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..5 6029 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 6030 // CHECK25-NEXT: entry: 6031 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6032 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6033 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6034 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6035 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6036 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6037 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6038 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6039 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6040 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 6041 // CHECK25-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 6042 // CHECK25-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 6043 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 6044 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 6045 // CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 6046 // CHECK25-NEXT: ret void 6047 // 6048 // 6049 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 6050 // CHECK26-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]], i64 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 6051 // CHECK26-NEXT: entry: 6052 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 6053 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8 6054 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 6055 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 6056 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8 6057 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 6058 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32* 6059 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 6060 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV3]], align 8 6061 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 6062 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 6063 // CHECK26-NEXT: ret void 6064 // 6065 // 6066 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined. 6067 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6068 // CHECK26-NEXT: entry: 6069 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6070 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6071 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6072 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6073 // CHECK26-NEXT: ret void 6074 // 6075 // 6076 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 6077 // CHECK26-SAME: (i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6078 // CHECK26-NEXT: entry: 6079 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 6080 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6081 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 6082 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 6083 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 6084 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 6085 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 6086 // CHECK26-NEXT: ret void 6087 // 6088 // 6089 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..1 6090 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6091 // CHECK26-NEXT: entry: 6092 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6093 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6094 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6095 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6096 // CHECK26-NEXT: ret void 6097 // 6098 // 6099 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 6100 // CHECK26-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6101 // CHECK26-NEXT: entry: 6102 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6103 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6104 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 6105 // CHECK26-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 6106 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6107 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6108 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6109 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 6110 // CHECK26-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6111 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 6112 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 6113 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 6114 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 6115 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 6116 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32* 6117 // CHECK26-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 6118 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[B_CASTED]], align 8 6119 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP4]]) 6120 // CHECK26-NEXT: ret void 6121 // 6122 // 6123 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..2 6124 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 6125 // CHECK26-NEXT: entry: 6126 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6127 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6128 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6129 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6130 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6131 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6132 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6133 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6134 // CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6135 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 6136 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 6137 // CHECK26-NEXT: [[CONV1:%.*]] = sitofp i32 [[TMP1]] to double 6138 // CHECK26-NEXT: [[ADD:%.*]] = fadd double [[CONV1]], 1.500000e+00 6139 // CHECK26-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6140 // CHECK26-NEXT: store double [[ADD]], double* [[A]], align 8 6141 // CHECK26-NEXT: ret void 6142 // 6143 // 6144 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 6145 // CHECK26-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6146 // CHECK26-NEXT: entry: 6147 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6148 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6149 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6150 // CHECK26-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6151 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 6152 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 6153 // CHECK26-NEXT: ret void 6154 // 6155 // 6156 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..3 6157 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6158 // CHECK26-NEXT: entry: 6159 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6160 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6161 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6162 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6163 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6164 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6165 // CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6166 // CHECK26-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6167 // CHECK26-NEXT: store double 2.500000e+00, double* [[A]], align 8 6168 // CHECK26-NEXT: ret void 6169 // 6170 // 6171 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 6172 // CHECK26-SAME: () #[[ATTR0]] { 6173 // CHECK26-NEXT: entry: 6174 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6175 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 6176 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 6177 // CHECK26-NEXT: ret void 6178 // 6179 // 6180 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..4 6181 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6182 // CHECK26-NEXT: entry: 6183 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6184 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6185 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6186 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6187 // CHECK26-NEXT: ret void 6188 // 6189 // 6190 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 6191 // CHECK26-SAME: (i64 [[A:%.*]], i64 [[B:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6192 // CHECK26-NEXT: entry: 6193 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6194 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6195 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 6196 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 6197 // CHECK26-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 6198 // CHECK26-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6199 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6200 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6201 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 6202 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6203 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 6204 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i16* 6205 // CHECK26-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV2]], align 8 6206 // CHECK26-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 6207 // CHECK26-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 6208 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 6209 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 6210 // CHECK26-NEXT: store i32 [[TMP3]], i32* [[CONV3]], align 4 6211 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8 6212 // CHECK26-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV1]], align 8 6213 // CHECK26-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i16* 6214 // CHECK26-NEXT: store i16 [[TMP5]], i16* [[CONV4]], align 2 6215 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 6216 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]]) 6217 // CHECK26-NEXT: ret void 6218 // 6219 // 6220 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..5 6221 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { 6222 // CHECK26-NEXT: entry: 6223 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6224 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6225 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6226 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 6227 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6228 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6229 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6230 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 6231 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6232 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16* 6233 // CHECK26-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV1]], align 8 6234 // CHECK26-NEXT: [[CONV2:%.*]] = sext i16 [[TMP0]] to i32 6235 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 6236 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV2]] 6237 // CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 6238 // CHECK26-NEXT: ret void 6239 // 6240 // 6241 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 6242 // CHECK27-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 6243 // CHECK27-NEXT: entry: 6244 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6245 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 6246 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 6247 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6248 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 6249 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6250 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 6251 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 6252 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 6253 // CHECK27-NEXT: ret void 6254 // 6255 // 6256 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined. 6257 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6258 // CHECK27-NEXT: entry: 6259 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6260 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6261 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6262 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6263 // CHECK27-NEXT: ret void 6264 // 6265 // 6266 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 6267 // CHECK27-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6268 // CHECK27-NEXT: entry: 6269 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6270 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6271 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6272 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6273 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 6274 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 6275 // CHECK27-NEXT: ret void 6276 // 6277 // 6278 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..1 6279 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6280 // CHECK27-NEXT: entry: 6281 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6282 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6283 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6284 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6285 // CHECK27-NEXT: ret void 6286 // 6287 // 6288 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 6289 // CHECK27-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6290 // CHECK27-NEXT: entry: 6291 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6292 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6293 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6294 // CHECK27-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6295 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6296 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6297 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6298 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6299 // CHECK27-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6300 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6301 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 6302 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 6303 // CHECK27-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 6304 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 6305 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 6306 // CHECK27-NEXT: ret void 6307 // 6308 // 6309 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..2 6310 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 6311 // CHECK27-NEXT: entry: 6312 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6313 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6314 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6315 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6316 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6317 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6318 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6319 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6320 // CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6321 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 6322 // CHECK27-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 6323 // CHECK27-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6324 // CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6325 // CHECK27-NEXT: store double [[ADD]], double* [[A]], align 4 6326 // CHECK27-NEXT: ret void 6327 // 6328 // 6329 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 6330 // CHECK27-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6331 // CHECK27-NEXT: entry: 6332 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6333 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6334 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6335 // CHECK27-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6336 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 6337 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 6338 // CHECK27-NEXT: ret void 6339 // 6340 // 6341 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..3 6342 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6343 // CHECK27-NEXT: entry: 6344 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6345 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6346 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6347 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6348 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6349 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6350 // CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6351 // CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6352 // CHECK27-NEXT: store double 2.500000e+00, double* [[A]], align 4 6353 // CHECK27-NEXT: ret void 6354 // 6355 // 6356 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 6357 // CHECK27-SAME: () #[[ATTR0]] { 6358 // CHECK27-NEXT: entry: 6359 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6360 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 6361 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 6362 // CHECK27-NEXT: ret void 6363 // 6364 // 6365 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..4 6366 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6367 // CHECK27-NEXT: entry: 6368 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6369 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6370 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6371 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6372 // CHECK27-NEXT: ret void 6373 // 6374 // 6375 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 6376 // CHECK27-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6377 // CHECK27-NEXT: entry: 6378 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6379 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6380 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6381 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6382 // CHECK27-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6383 // CHECK27-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6384 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6385 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6386 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6387 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 6388 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 6389 // CHECK27-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 6390 // CHECK27-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 6391 // CHECK27-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 6392 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 6393 // CHECK27-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 6394 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 6395 // CHECK27-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 6396 // CHECK27-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 6397 // CHECK27-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 6398 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 6399 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 6400 // CHECK27-NEXT: ret void 6401 // 6402 // 6403 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..5 6404 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 6405 // CHECK27-NEXT: entry: 6406 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6407 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6408 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6409 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6410 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6411 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6412 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6413 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6414 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 6415 // CHECK27-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 6416 // CHECK27-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 6417 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6418 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 6419 // CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 6420 // CHECK27-NEXT: ret void 6421 // 6422 // 6423 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l104 6424 // CHECK28-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]], i32 [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { 6425 // CHECK28-NEXT: entry: 6426 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6427 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4 6428 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 6429 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6430 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 6431 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6432 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4 6433 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) 6434 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 6435 // CHECK28-NEXT: ret void 6436 // 6437 // 6438 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined. 6439 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6440 // CHECK28-NEXT: entry: 6441 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6442 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6443 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6444 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6445 // CHECK28-NEXT: ret void 6446 // 6447 // 6448 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l108 6449 // CHECK28-SAME: (i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6450 // CHECK28-NEXT: entry: 6451 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6452 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6453 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6454 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6455 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP1]]) 6456 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*)) 6457 // CHECK28-NEXT: ret void 6458 // 6459 // 6460 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..1 6461 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6462 // CHECK28-NEXT: entry: 6463 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6464 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6465 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6466 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6467 // CHECK28-NEXT: ret void 6468 // 6469 // 6470 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l121 6471 // CHECK28-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6472 // CHECK28-NEXT: entry: 6473 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6474 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6475 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6476 // CHECK28-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6477 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6478 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6479 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6480 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6481 // CHECK28-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6482 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6483 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 [[TMP2]]) 6484 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4 6485 // CHECK28-NEXT: store i32 [[TMP3]], i32* [[B_CASTED]], align 4 6486 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_CASTED]], align 4 6487 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP4]]) 6488 // CHECK28-NEXT: ret void 6489 // 6490 // 6491 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..2 6492 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 6493 // CHECK28-NEXT: entry: 6494 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6495 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6496 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6497 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6498 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6499 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6500 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6501 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6502 // CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6503 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 6504 // CHECK28-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double 6505 // CHECK28-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6506 // CHECK28-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6507 // CHECK28-NEXT: store double [[ADD]], double* [[A]], align 4 6508 // CHECK28-NEXT: ret void 6509 // 6510 // 6511 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l126 6512 // CHECK28-SAME: (%struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6513 // CHECK28-NEXT: entry: 6514 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6515 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6516 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6517 // CHECK28-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6518 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 1024) 6519 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]]) 6520 // CHECK28-NEXT: ret void 6521 // 6522 // 6523 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..3 6524 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]]) #[[ATTR0]] { 6525 // CHECK28-NEXT: entry: 6526 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6527 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6528 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6529 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6530 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6531 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6532 // CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6533 // CHECK28-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6534 // CHECK28-NEXT: store double 2.500000e+00, double* [[A]], align 4 6535 // CHECK28-NEXT: ret void 6536 // 6537 // 6538 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l88 6539 // CHECK28-SAME: () #[[ATTR0]] { 6540 // CHECK28-NEXT: entry: 6541 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6542 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 0, i32 20) 6543 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 6544 // CHECK28-NEXT: ret void 6545 // 6546 // 6547 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..4 6548 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 6549 // CHECK28-NEXT: entry: 6550 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6551 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6552 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6553 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6554 // CHECK28-NEXT: ret void 6555 // 6556 // 6557 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l93 6558 // CHECK28-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 6559 // CHECK28-NEXT: entry: 6560 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6561 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6562 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6563 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6564 // CHECK28-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6565 // CHECK28-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 6566 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6567 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6568 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6569 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 6570 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i16* 6571 // CHECK28-NEXT: [[TMP1:%.*]] = load i16, i16* [[CONV1]], align 4 6572 // CHECK28-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 6573 // CHECK28-NEXT: call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1024) 6574 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 6575 // CHECK28-NEXT: store i32 [[TMP3]], i32* [[A_CASTED]], align 4 6576 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_CASTED]], align 4 6577 // CHECK28-NEXT: [[TMP5:%.*]] = load i16, i16* [[CONV]], align 4 6578 // CHECK28-NEXT: [[CONV2:%.*]] = bitcast i32* [[B_CASTED]] to i16* 6579 // CHECK28-NEXT: store i16 [[TMP5]], i16* [[CONV2]], align 2 6580 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 6581 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]]) 6582 // CHECK28-NEXT: ret void 6583 // 6584 // 6585 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..5 6586 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { 6587 // CHECK28-NEXT: entry: 6588 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6589 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6590 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6591 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6592 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6593 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6594 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6595 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6596 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16* 6597 // CHECK28-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 6598 // CHECK28-NEXT: [[CONV1:%.*]] = sext i16 [[TMP0]] to i32 6599 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6600 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CONV1]] 6601 // CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 6602 // CHECK28-NEXT: ret void 6603 // 6604 // 6605 // CHECK29-LABEL: define {{[^@]+}}@_Z3bari 6606 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 6607 // CHECK29-NEXT: entry: 6608 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6609 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 6610 // CHECK29-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 6611 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6612 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 6613 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6614 // CHECK29-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 6615 // CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 6616 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 6617 // CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6618 // CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6619 // CHECK29-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 6620 // CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6621 // CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 6622 // CHECK29-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 6623 // CHECK29-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6624 // CHECK29-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 6625 // CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 6626 // CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 6627 // CHECK29-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 6628 // CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 6629 // CHECK29-NEXT: ret i32 [[TMP6]] 6630 // 6631 // 6632 // CHECK29-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 6633 // CHECK29-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 6634 // CHECK29-NEXT: entry: 6635 // CHECK29-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6636 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6637 // CHECK29-NEXT: [[B:%.*]] = alloca i32, align 4 6638 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6639 // CHECK29-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6640 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6641 // CHECK29-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6642 // CHECK29-NEXT: store i32 1, i32* [[B]], align 4 6643 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6644 // CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 6645 // CHECK29-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 6646 // CHECK29-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 6647 // CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 6648 // CHECK29-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 6649 // CHECK29-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6650 // CHECK29-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6651 // CHECK29-NEXT: store double [[ADD]], double* [[A]], align 8 6652 // CHECK29-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6653 // CHECK29-NEXT: store double 2.500000e+00, double* [[A2]], align 8 6654 // CHECK29-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6655 // CHECK29-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 6656 // CHECK29-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 6657 // CHECK29-NEXT: ret i32 [[CONV4]] 6658 // 6659 // 6660 // CHECK29-LABEL: define {{[^@]+}}@_ZL7fstatici 6661 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 6662 // CHECK29-NEXT: entry: 6663 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6664 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6665 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6666 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 6667 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6668 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6669 // CHECK29-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 6670 // CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6671 // CHECK29-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 6672 // CHECK29-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6673 // CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6674 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 6675 // CHECK29-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 6676 // CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 6677 // CHECK29-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 6678 // CHECK29-NEXT: ret i32 [[ADD3]] 6679 // 6680 // 6681 // CHECK29-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6682 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 6683 // CHECK29-NEXT: entry: 6684 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6685 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 6686 // CHECK29-NEXT: [[B:%.*]] = alloca i16, align 2 6687 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 6688 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6689 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 6690 // CHECK29-NEXT: store i16 1, i16* [[B]], align 2 6691 // CHECK29-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 6692 // CHECK29-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 6693 // CHECK29-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 6694 // CHECK29-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 6695 // CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 6696 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 6697 // CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6698 // CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6699 // CHECK29-NEXT: ret i32 [[TMP3]] 6700 // 6701 // 6702 // CHECK30-LABEL: define {{[^@]+}}@_Z3bari 6703 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { 6704 // CHECK30-NEXT: entry: 6705 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6706 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 6707 // CHECK30-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 6708 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6709 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 6710 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6711 // CHECK30-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 signext [[TMP0]]) 6712 // CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 6713 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 6714 // CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6715 // CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6716 // CHECK30-NEXT: [[CALL1:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP2]]) 6717 // CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6718 // CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 6719 // CHECK30-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 6720 // CHECK30-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6721 // CHECK30-NEXT: [[CALL3:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP4]]) 6722 // CHECK30-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 6723 // CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 6724 // CHECK30-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 6725 // CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 6726 // CHECK30-NEXT: ret i32 [[TMP6]] 6727 // 6728 // 6729 // CHECK30-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 6730 // CHECK30-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 6731 // CHECK30-NEXT: entry: 6732 // CHECK30-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 6733 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6734 // CHECK30-NEXT: [[B:%.*]] = alloca i32, align 4 6735 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6736 // CHECK30-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 6737 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6738 // CHECK30-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 6739 // CHECK30-NEXT: store i32 1, i32* [[B]], align 4 6740 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6741 // CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 6742 // CHECK30-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 6743 // CHECK30-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 6744 // CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 6745 // CHECK30-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 6746 // CHECK30-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6747 // CHECK30-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6748 // CHECK30-NEXT: store double [[ADD]], double* [[A]], align 8 6749 // CHECK30-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6750 // CHECK30-NEXT: store double 2.500000e+00, double* [[A2]], align 8 6751 // CHECK30-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6752 // CHECK30-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 8 6753 // CHECK30-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 6754 // CHECK30-NEXT: ret i32 [[CONV4]] 6755 // 6756 // 6757 // CHECK30-LABEL: define {{[^@]+}}@_ZL7fstatici 6758 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 6759 // CHECK30-NEXT: entry: 6760 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6761 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6762 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6763 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 6764 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6765 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6766 // CHECK30-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 6767 // CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6768 // CHECK30-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 6769 // CHECK30-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6770 // CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6771 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 6772 // CHECK30-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 6773 // CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 6774 // CHECK30-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 6775 // CHECK30-NEXT: ret i32 [[ADD3]] 6776 // 6777 // 6778 // CHECK30-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6779 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 6780 // CHECK30-NEXT: entry: 6781 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6782 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 6783 // CHECK30-NEXT: [[B:%.*]] = alloca i16, align 2 6784 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 6785 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6786 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 6787 // CHECK30-NEXT: store i16 1, i16* [[B]], align 2 6788 // CHECK30-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 6789 // CHECK30-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 6790 // CHECK30-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 6791 // CHECK30-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 6792 // CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 6793 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 6794 // CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6795 // CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6796 // CHECK30-NEXT: ret i32 [[TMP3]] 6797 // 6798 // 6799 // CHECK31-LABEL: define {{[^@]+}}@_Z3bari 6800 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 6801 // CHECK31-NEXT: entry: 6802 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6803 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 6804 // CHECK31-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 6805 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6806 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 6807 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6808 // CHECK31-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 6809 // CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 6810 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 6811 // CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6812 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6813 // CHECK31-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 6814 // CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6815 // CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 6816 // CHECK31-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 6817 // CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6818 // CHECK31-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 6819 // CHECK31-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 6820 // CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 6821 // CHECK31-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 6822 // CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 6823 // CHECK31-NEXT: ret i32 [[TMP6]] 6824 // 6825 // 6826 // CHECK31-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 6827 // CHECK31-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 6828 // CHECK31-NEXT: entry: 6829 // CHECK31-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6830 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6831 // CHECK31-NEXT: [[B:%.*]] = alloca i32, align 4 6832 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6833 // CHECK31-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6834 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6835 // CHECK31-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6836 // CHECK31-NEXT: store i32 1, i32* [[B]], align 4 6837 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6838 // CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 6839 // CHECK31-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 6840 // CHECK31-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 6841 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 6842 // CHECK31-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 6843 // CHECK31-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6844 // CHECK31-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6845 // CHECK31-NEXT: store double [[ADD]], double* [[A]], align 4 6846 // CHECK31-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6847 // CHECK31-NEXT: store double 2.500000e+00, double* [[A2]], align 4 6848 // CHECK31-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6849 // CHECK31-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 6850 // CHECK31-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 6851 // CHECK31-NEXT: ret i32 [[CONV4]] 6852 // 6853 // 6854 // CHECK31-LABEL: define {{[^@]+}}@_ZL7fstatici 6855 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 6856 // CHECK31-NEXT: entry: 6857 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6858 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6859 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6860 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 6861 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6862 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6863 // CHECK31-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 6864 // CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6865 // CHECK31-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 6866 // CHECK31-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6867 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6868 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 6869 // CHECK31-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 6870 // CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 6871 // CHECK31-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 6872 // CHECK31-NEXT: ret i32 [[ADD3]] 6873 // 6874 // 6875 // CHECK31-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6876 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 6877 // CHECK31-NEXT: entry: 6878 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6879 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 6880 // CHECK31-NEXT: [[B:%.*]] = alloca i16, align 2 6881 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 6882 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6883 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 6884 // CHECK31-NEXT: store i16 1, i16* [[B]], align 2 6885 // CHECK31-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 6886 // CHECK31-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 6887 // CHECK31-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 6888 // CHECK31-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 6889 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 6890 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 6891 // CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6892 // CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6893 // CHECK31-NEXT: ret i32 [[TMP3]] 6894 // 6895 // 6896 // CHECK32-LABEL: define {{[^@]+}}@_Z3bari 6897 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { 6898 // CHECK32-NEXT: entry: 6899 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6900 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 6901 // CHECK32-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 6902 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6903 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 6904 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6905 // CHECK32-NEXT: [[CALL:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull dereferenceable(8) [[S]], i32 [[TMP0]]) 6906 // CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 6907 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 6908 // CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6909 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6910 // CHECK32-NEXT: [[CALL1:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP2]]) 6911 // CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6912 // CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 6913 // CHECK32-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 6914 // CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6915 // CHECK32-NEXT: [[CALL3:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP4]]) 6916 // CHECK32-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 6917 // CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 6918 // CHECK32-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 6919 // CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4 6920 // CHECK32-NEXT: ret i32 [[TMP6]] 6921 // 6922 // 6923 // CHECK32-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 6924 // CHECK32-SAME: (%struct.S1* nonnull dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 6925 // CHECK32-NEXT: entry: 6926 // CHECK32-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6927 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6928 // CHECK32-NEXT: [[B:%.*]] = alloca i32, align 4 6929 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6930 // CHECK32-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6931 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6932 // CHECK32-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6933 // CHECK32-NEXT: store i32 1, i32* [[B]], align 4 6934 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6935 // CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[B]], align 4 6936 // CHECK32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] 6937 // CHECK32-NEXT: store i32 [[SUB]], i32* [[DOTCAPTURE_EXPR_]], align 4 6938 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[B]], align 4 6939 // CHECK32-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double 6940 // CHECK32-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6941 // CHECK32-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6942 // CHECK32-NEXT: store double [[ADD]], double* [[A]], align 4 6943 // CHECK32-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6944 // CHECK32-NEXT: store double 2.500000e+00, double* [[A2]], align 4 6945 // CHECK32-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 6946 // CHECK32-NEXT: [[TMP3:%.*]] = load double, double* [[A3]], align 4 6947 // CHECK32-NEXT: [[CONV4:%.*]] = fptosi double [[TMP3]] to i32 6948 // CHECK32-NEXT: ret i32 [[CONV4]] 6949 // 6950 // 6951 // CHECK32-LABEL: define {{[^@]+}}@_ZL7fstatici 6952 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 6953 // CHECK32-NEXT: entry: 6954 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6955 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6956 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6957 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 6958 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6959 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6960 // CHECK32-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 6961 // CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6962 // CHECK32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP1]], 32 6963 // CHECK32-NEXT: store i32 [[MUL]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6964 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6965 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP2]] 6966 // CHECK32-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_2]], align 4 6967 // CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 6968 // CHECK32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 6969 // CHECK32-NEXT: ret i32 [[ADD3]] 6970 // 6971 // 6972 // CHECK32-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6973 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 6974 // CHECK32-NEXT: entry: 6975 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6976 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 6977 // CHECK32-NEXT: [[B:%.*]] = alloca i16, align 2 6978 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i16, align 2 6979 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6980 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 6981 // CHECK32-NEXT: store i16 1, i16* [[B]], align 2 6982 // CHECK32-NEXT: [[TMP0:%.*]] = load i16, i16* [[B]], align 2 6983 // CHECK32-NEXT: store i16 [[TMP0]], i16* [[DOTCAPTURE_EXPR_]], align 2 6984 // CHECK32-NEXT: [[TMP1:%.*]] = load i16, i16* [[B]], align 2 6985 // CHECK32-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 6986 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 6987 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[CONV]] 6988 // CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6989 // CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6990 // CHECK32-NEXT: ret i32 [[TMP3]] 6991 // 6992