1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // Test host codegen. 3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1 4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 5 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2 6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3 7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4 9 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK5 10 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 11 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK6 12 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK7 13 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 14 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK8 15 16 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 17 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 18 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK10 19 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 20 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 21 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK12 22 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK13 23 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 24 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK14 25 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK15 26 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 27 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK16 28 29 // Test target codegen - host bc file has to be created first. 30 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 31 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK17 32 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 33 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK18 34 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 35 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK19 36 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 37 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK20 38 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 39 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK21 40 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 41 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK22 42 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 43 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK23 44 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 45 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK24 46 47 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 48 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK25 49 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 50 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK26 51 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 52 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK27 53 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 54 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK28 55 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 56 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK29 57 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 58 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK30 59 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 60 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK31 61 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 62 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK32 63 64 // expected-no-diagnostics 65 #ifndef HEADER 66 #define HEADER 67 68 69 70 // We have 8 target regions, but only 7 that actually will generate offloading 71 // code, only 6 will have mapped arguments, and only 4 have all-constant map 72 // sizes. 73 74 75 76 // Check target registration is registered as a Ctor. 77 78 79 template<typename tx, typename ty> 80 struct TT{ 81 tx X; 82 ty Y; 83 }; 84 85 long long get_val() { return 0; } 86 87 int foo(int n) { 88 int a = 0; 89 short aa = 0; 90 float b[10]; 91 float bn[n]; 92 double c[5][10]; 93 double cn[5][n]; 94 TT<long long, char> d; 95 96 #pragma omp target parallel for simd nowait 97 for (int i = 3; i < 32; i += 5) { 98 } 99 100 long long k = get_val(); 101 #pragma omp target parallel for simd if(target: 0) linear(k : 3) schedule(dynamic) 102 for (int i = 10; i > 1; i--) { 103 a += 1; 104 } 105 106 107 int lin = 12; 108 #pragma omp target parallel for simd if(target: 1) linear(lin, a : get_val()) 109 for (unsigned long long it = 2000; it >= 600; it-=400) { 110 aa += 1; 111 } 112 113 114 115 116 #pragma omp target parallel for simd if(target: n>10) 117 for (short it = 6; it <= 20; it-=-4) { 118 a += 1; 119 aa += 1; 120 } 121 122 // We capture 3 VLA sizes in this target region 123 124 125 126 127 128 // The names below are not necessarily consistent with the names used for the 129 // addresses above as some are repeated. 130 131 132 133 134 135 136 137 138 139 140 #pragma omp target parallel for simd if(target: n>20) schedule(static, a) 141 for (unsigned char it = 'z'; it >= 'a'; it+=-1) { 142 a += 1; 143 b[2] += 1.0; 144 bn[3] += 1.0; 145 c[1][2] += 1.0; 146 cn[1][3] += 1.0; 147 d.X += 1; 148 d.Y += 1; 149 } 150 151 return a; 152 } 153 154 // Check that the offloading functions are emitted and that the arguments are 155 // correct and loaded correctly for the target regions in foo(). 156 157 158 159 160 // Create stack storage and store argument in there. 161 162 // Create stack storage and store argument in there. 163 164 // Create stack storage and store argument in there. 165 166 // Create local storage for each capture. 167 168 169 170 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 171 172 template<typename tx> 173 tx ftemplate(int n) { 174 tx a = 0; 175 short aa = 0; 176 tx b[10]; 177 178 #pragma omp target parallel for simd if(target: n>40) 179 for (long long i = -10; i < 10; i += 3) { 180 a += 1; 181 aa += 1; 182 b[2] += 1; 183 } 184 185 return a; 186 } 187 188 static 189 int fstatic(int n) { 190 int a = 0; 191 short aa = 0; 192 char aaa = 0; 193 int b[10]; 194 195 #pragma omp target parallel for simd if(target: n>50) 196 for (unsigned i=100; i<10; i+=10) { 197 a += 1; 198 aa += 1; 199 aaa += 1; 200 b[2] += 1; 201 } 202 203 return a; 204 } 205 206 struct S1 { 207 double a; 208 209 int r1(int n){ 210 int b = n+1; 211 short int c[2][n]; 212 213 #ifdef OMP5 214 #pragma omp target parallel for simd if(n>60) nontemporal(a) 215 #else 216 #pragma omp target parallel for simd if(target: n>60) 217 #endif // OMP5 218 for (unsigned long long it = 2000; it >= 600; it -= 400) { 219 this->a = (double)b + 1.5; 220 c[1][1] = ++a; 221 } 222 223 return c[1][1] + (int)b; 224 } 225 }; 226 227 int bar(int n){ 228 int a = 0; 229 230 a += foo(n); 231 232 S1 S; 233 a += S.r1(n); 234 235 a += fstatic(n); 236 237 a += ftemplate<int>(n); 238 239 return a; 240 } 241 242 243 244 // We capture 2 VLA sizes in this target region 245 246 247 // The names below are not necessarily consistent with the names used for the 248 // addresses above as some are repeated. 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 // Check that the offloading functions are emitted and that the arguments are 269 // correct and loaded correctly for the target regions of the callees of bar(). 270 271 // Create local storage for each capture. 272 // Store captures in the context. 273 274 275 276 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 277 278 279 // Create local storage for each capture. 280 // Store captures in the context. 281 282 283 284 285 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 286 287 // Create local storage for each capture. 288 // Store captures in the context. 289 290 291 292 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 293 294 295 #endif 296 // CHECK1-LABEL: define {{[^@]+}}@_Z7get_valv 297 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 298 // CHECK1-NEXT: entry: 299 // CHECK1-NEXT: ret i64 0 300 // 301 // 302 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooi 303 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 304 // CHECK1-NEXT: entry: 305 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 306 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 307 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 308 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x float], align 4 309 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 310 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 311 // CHECK1-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 312 // CHECK1-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 313 // CHECK1-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 314 // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 315 // CHECK1-NEXT: [[K:%.*]] = alloca i64, align 8 316 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 317 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 318 // CHECK1-NEXT: [[LIN:%.*]] = alloca i32, align 4 319 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 320 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 321 // CHECK1-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 322 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 323 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 324 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 325 // CHECK1-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 326 // CHECK1-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 327 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 328 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 329 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 330 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 331 // CHECK1-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 332 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 333 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 334 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 335 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 336 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 337 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 338 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 339 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 340 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 341 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 342 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 343 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 344 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 345 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 346 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 347 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 348 // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 349 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 350 // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 351 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 352 // CHECK1-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 353 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* 354 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 355 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) 356 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 357 // CHECK1-NEXT: store i64 [[CALL]], i64* [[K]], align 8 358 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 359 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 360 // CHECK1-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 361 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 362 // CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 363 // CHECK1-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 364 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 365 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] 366 // CHECK1-NEXT: store i32 12, i32* [[LIN]], align 4 367 // CHECK1-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 368 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 369 // CHECK1-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 370 // CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 371 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 372 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 373 // CHECK1-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 374 // CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 375 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 376 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 377 // CHECK1-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 378 // CHECK1-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 379 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 380 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 381 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 382 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 383 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* 384 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 385 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 386 // CHECK1-NEXT: store i8* null, i8** [[TMP25]], align 8 387 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 388 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 389 // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 390 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 391 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 392 // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 393 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 394 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8 395 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 396 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 397 // CHECK1-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 398 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 399 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 400 // CHECK1-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 401 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 402 // CHECK1-NEXT: store i8* null, i8** [[TMP35]], align 8 403 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 404 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 405 // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 406 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 407 // CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 408 // CHECK1: omp_offload.failed: 409 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] 410 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 411 // CHECK1: omp_offload.cont: 412 // CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 413 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 414 // CHECK1-NEXT: store i32 [[TMP40]], i32* [[CONV7]], align 4 415 // CHECK1-NEXT: [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8 416 // CHECK1-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2 417 // CHECK1-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 418 // CHECK1-NEXT: store i16 [[TMP42]], i16* [[CONV9]], align 2 419 // CHECK1-NEXT: [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 420 // CHECK1-NEXT: [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4 421 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10 422 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 423 // CHECK1: omp_if.then: 424 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 425 // CHECK1-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 426 // CHECK1-NEXT: store i64 [[TMP41]], i64* [[TMP46]], align 8 427 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 428 // CHECK1-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* 429 // CHECK1-NEXT: store i64 [[TMP41]], i64* [[TMP48]], align 8 430 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 431 // CHECK1-NEXT: store i8* null, i8** [[TMP49]], align 8 432 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 433 // CHECK1-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* 434 // CHECK1-NEXT: store i64 [[TMP43]], i64* [[TMP51]], align 8 435 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 436 // CHECK1-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* 437 // CHECK1-NEXT: store i64 [[TMP43]], i64* [[TMP53]], align 8 438 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 439 // CHECK1-NEXT: store i8* null, i8** [[TMP54]], align 8 440 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 441 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 442 // CHECK1-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 443 // CHECK1-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 444 // CHECK1-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 445 // CHECK1: omp_offload.failed13: 446 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 447 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT14]] 448 // CHECK1: omp_offload.cont14: 449 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 450 // CHECK1: omp_if.else: 451 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 452 // CHECK1-NEXT: br label [[OMP_IF_END]] 453 // CHECK1: omp_if.end: 454 // CHECK1-NEXT: [[TMP59:%.*]] = load i32, i32* [[A]], align 4 455 // CHECK1-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4 456 // CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[A]], align 4 457 // CHECK1-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 458 // CHECK1-NEXT: store i32 [[TMP60]], i32* [[CONV16]], align 4 459 // CHECK1-NEXT: [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8 460 // CHECK1-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 461 // CHECK1-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 462 // CHECK1-NEXT: store i32 [[TMP62]], i32* [[CONV17]], align 4 463 // CHECK1-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 464 // CHECK1-NEXT: [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4 465 // CHECK1-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20 466 // CHECK1-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 467 // CHECK1: omp_if.then19: 468 // CHECK1-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4 469 // CHECK1-NEXT: [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]] 470 // CHECK1-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8 471 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 472 // CHECK1-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 473 // CHECK1-NEXT: store i64 [[TMP61]], i64* [[TMP69]], align 8 474 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 475 // CHECK1-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* 476 // CHECK1-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 477 // CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 478 // CHECK1-NEXT: store i64 4, i64* [[TMP72]], align 8 479 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 480 // CHECK1-NEXT: store i8* null, i8** [[TMP73]], align 8 481 // CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 482 // CHECK1-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 483 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 484 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 485 // CHECK1-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** 486 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 487 // CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 488 // CHECK1-NEXT: store i64 40, i64* [[TMP78]], align 8 489 // CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 490 // CHECK1-NEXT: store i8* null, i8** [[TMP79]], align 8 491 // CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 492 // CHECK1-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* 493 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 494 // CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 495 // CHECK1-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* 496 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 497 // CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 498 // CHECK1-NEXT: store i64 8, i64* [[TMP84]], align 8 499 // CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 500 // CHECK1-NEXT: store i8* null, i8** [[TMP85]], align 8 501 // CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 502 // CHECK1-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 503 // CHECK1-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 504 // CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 505 // CHECK1-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** 506 // CHECK1-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 507 // CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 508 // CHECK1-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 509 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 510 // CHECK1-NEXT: store i8* null, i8** [[TMP91]], align 8 511 // CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 512 // CHECK1-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 513 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 514 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 515 // CHECK1-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** 516 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 517 // CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 518 // CHECK1-NEXT: store i64 400, i64* [[TMP96]], align 8 519 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 520 // CHECK1-NEXT: store i8* null, i8** [[TMP97]], align 8 521 // CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 522 // CHECK1-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* 523 // CHECK1-NEXT: store i64 5, i64* [[TMP99]], align 8 524 // CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 525 // CHECK1-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* 526 // CHECK1-NEXT: store i64 5, i64* [[TMP101]], align 8 527 // CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 528 // CHECK1-NEXT: store i64 8, i64* [[TMP102]], align 8 529 // CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 530 // CHECK1-NEXT: store i8* null, i8** [[TMP103]], align 8 531 // CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 532 // CHECK1-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* 533 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 534 // CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 535 // CHECK1-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* 536 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 537 // CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 538 // CHECK1-NEXT: store i64 8, i64* [[TMP108]], align 8 539 // CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 540 // CHECK1-NEXT: store i8* null, i8** [[TMP109]], align 8 541 // CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 542 // CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 543 // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 544 // CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 545 // CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** 546 // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 547 // CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 548 // CHECK1-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 549 // CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 550 // CHECK1-NEXT: store i8* null, i8** [[TMP115]], align 8 551 // CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 552 // CHECK1-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 553 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 554 // CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 555 // CHECK1-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** 556 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 557 // CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 558 // CHECK1-NEXT: store i64 16, i64* [[TMP120]], align 8 559 // CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 560 // CHECK1-NEXT: store i8* null, i8** [[TMP121]], align 8 561 // CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 562 // CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* 563 // CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 564 // CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 565 // CHECK1-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* 566 // CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 567 // CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 568 // CHECK1-NEXT: store i64 4, i64* [[TMP126]], align 8 569 // CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 570 // CHECK1-NEXT: store i8* null, i8** [[TMP127]], align 8 571 // CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 572 // CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 573 // CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 574 // CHECK1-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 575 // CHECK1-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 576 // CHECK1-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 577 // CHECK1: omp_offload.failed23: 578 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 579 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT24]] 580 // CHECK1: omp_offload.cont24: 581 // CHECK1-NEXT: br label [[OMP_IF_END26:%.*]] 582 // CHECK1: omp_if.else25: 583 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 584 // CHECK1-NEXT: br label [[OMP_IF_END26]] 585 // CHECK1: omp_if.end26: 586 // CHECK1-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 587 // CHECK1-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 588 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) 589 // CHECK1-NEXT: ret i32 [[TMP133]] 590 // 591 // 592 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 593 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] { 594 // CHECK1-NEXT: entry: 595 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 596 // CHECK1-NEXT: ret void 597 // 598 // 599 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 600 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 601 // CHECK1-NEXT: entry: 602 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 603 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 604 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 605 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 606 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 607 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 608 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 609 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 610 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 611 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 612 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 613 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 614 // CHECK1-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 615 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 616 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 617 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 618 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 619 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 620 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 621 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 622 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 623 // CHECK1: cond.true: 624 // CHECK1-NEXT: br label [[COND_END:%.*]] 625 // CHECK1: cond.false: 626 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 627 // CHECK1-NEXT: br label [[COND_END]] 628 // CHECK1: cond.end: 629 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 630 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 631 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 632 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 633 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 634 // CHECK1: omp.inner.for.cond: 635 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 636 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 637 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 638 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 639 // CHECK1: omp.inner.for.body: 640 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 641 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 642 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 643 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10 644 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 645 // CHECK1: omp.body.continue: 646 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 647 // CHECK1: omp.inner.for.inc: 648 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 649 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 650 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 651 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 652 // CHECK1: omp.inner.for.end: 653 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 654 // CHECK1: omp.loop.exit: 655 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 656 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 657 // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 658 // CHECK1-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 659 // CHECK1: .omp.final.then: 660 // CHECK1-NEXT: store i32 33, i32* [[I]], align 4 661 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 662 // CHECK1: .omp.final.done: 663 // CHECK1-NEXT: ret void 664 // 665 // 666 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry. 667 // CHECK1-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 668 // CHECK1-NEXT: entry: 669 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 670 // CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 671 // CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 672 // CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 673 // CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 674 // CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 675 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 676 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 677 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 678 // CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 679 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 680 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 681 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 682 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 683 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 684 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 685 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 686 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 687 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 688 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 689 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 690 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 691 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 692 // CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 693 // CHECK1-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 694 // CHECK1-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 695 // CHECK1-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 696 // CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 697 // CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 698 // CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 699 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 700 // CHECK1-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 701 // CHECK1: omp_offload.failed.i: 702 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 703 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 704 // CHECK1: .omp_outlined..1.exit: 705 // CHECK1-NEXT: ret i32 0 706 // 707 // 708 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 709 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 710 // CHECK1-NEXT: entry: 711 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 712 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 713 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 714 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 715 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 716 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 717 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 718 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 719 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 720 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 721 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 722 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 723 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 724 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 725 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 726 // CHECK1-NEXT: ret void 727 // 728 // 729 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2 730 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 731 // CHECK1-NEXT: entry: 732 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 733 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 734 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 735 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 736 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 737 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 738 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 739 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 740 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 741 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 742 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 743 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 744 // CHECK1-NEXT: [[K1:%.*]] = alloca i64, align 8 745 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 746 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 747 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 748 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 749 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 750 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 751 // CHECK1-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 752 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 753 // CHECK1-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 754 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 755 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 756 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 757 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 758 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 759 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) 760 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 761 // CHECK1: omp.dispatch.cond: 762 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 763 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 764 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 765 // CHECK1: omp.dispatch.body: 766 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 767 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 768 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 769 // CHECK1: omp.inner.for.cond: 770 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 771 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26 772 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 773 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 774 // CHECK1: omp.inner.for.body: 775 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 776 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 777 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 778 // CHECK1-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26 779 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26 780 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 781 // CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 782 // CHECK1-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 783 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 784 // CHECK1-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26 785 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 786 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 787 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !26 788 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 789 // CHECK1: omp.body.continue: 790 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 791 // CHECK1: omp.inner.for.inc: 792 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 793 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 794 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 795 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 796 // CHECK1: omp.inner.for.end: 797 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 798 // CHECK1: omp.dispatch.inc: 799 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 800 // CHECK1: omp.dispatch.end: 801 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 802 // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 803 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 804 // CHECK1: .omp.final.then: 805 // CHECK1-NEXT: store i32 1, i32* [[I]], align 4 806 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 807 // CHECK1: .omp.final.done: 808 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 809 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 810 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 811 // CHECK1: .omp.linear.pu: 812 // CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 813 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 814 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 815 // CHECK1: .omp.linear.pu.done: 816 // CHECK1-NEXT: ret void 817 // 818 // 819 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 820 // CHECK1-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 821 // CHECK1-NEXT: entry: 822 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 823 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 824 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 825 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 826 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 827 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 828 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 829 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 830 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 831 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 832 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 833 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 834 // CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 835 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 836 // CHECK1-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 837 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 838 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 839 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 840 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 841 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 842 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 843 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 844 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 845 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 846 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 847 // CHECK1-NEXT: ret void 848 // 849 // 850 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 851 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 852 // CHECK1-NEXT: entry: 853 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 854 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 855 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 856 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 857 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 858 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 859 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 860 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 861 // CHECK1-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 862 // CHECK1-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 863 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 864 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 865 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 866 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 867 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 868 // CHECK1-NEXT: [[LIN4:%.*]] = alloca i32, align 4 869 // CHECK1-NEXT: [[A5:%.*]] = alloca i32, align 4 870 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 871 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 872 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 873 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 874 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 875 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 876 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 877 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 878 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 879 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 880 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 881 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 882 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 883 // CHECK1-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 884 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 885 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 886 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 887 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 888 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 889 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 890 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 891 // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 892 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 893 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 894 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 895 // CHECK1: cond.true: 896 // CHECK1-NEXT: br label [[COND_END:%.*]] 897 // CHECK1: cond.false: 898 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 899 // CHECK1-NEXT: br label [[COND_END]] 900 // CHECK1: cond.end: 901 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 902 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 903 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 904 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 905 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 906 // CHECK1: omp.inner.for.cond: 907 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 908 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 909 // CHECK1-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 910 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 911 // CHECK1: omp.inner.for.body: 912 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 913 // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 914 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 915 // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29 916 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29 917 // CHECK1-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 918 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 919 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 920 // CHECK1-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 921 // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 922 // CHECK1-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 923 // CHECK1-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29 924 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29 925 // CHECK1-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 926 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 927 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 928 // CHECK1-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 929 // CHECK1-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 930 // CHECK1-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 931 // CHECK1-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29 932 // CHECK1-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !29 933 // CHECK1-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 934 // CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 935 // CHECK1-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 936 // CHECK1-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !29 937 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 938 // CHECK1: omp.body.continue: 939 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 940 // CHECK1: omp.inner.for.inc: 941 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 942 // CHECK1-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 943 // CHECK1-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 944 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 945 // CHECK1: omp.inner.for.end: 946 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 947 // CHECK1: omp.loop.exit: 948 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 949 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 950 // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 951 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 952 // CHECK1: .omp.final.then: 953 // CHECK1-NEXT: store i64 400, i64* [[IT]], align 8 954 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 955 // CHECK1: .omp.final.done: 956 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 957 // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 958 // CHECK1-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 959 // CHECK1: .omp.linear.pu: 960 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 961 // CHECK1-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 962 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 963 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 964 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 965 // CHECK1: .omp.linear.pu.done: 966 // CHECK1-NEXT: ret void 967 // 968 // 969 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 970 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 971 // CHECK1-NEXT: entry: 972 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 973 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 974 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 975 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 976 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 977 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 978 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 979 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 980 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 981 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 982 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 983 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 984 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 985 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 986 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 987 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 988 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 989 // CHECK1-NEXT: ret void 990 // 991 // 992 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 993 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 994 // CHECK1-NEXT: entry: 995 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 996 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 997 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 998 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 999 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1000 // CHECK1-NEXT: [[TMP:%.*]] = alloca i16, align 2 1001 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1002 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1003 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1004 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1005 // CHECK1-NEXT: [[IT:%.*]] = alloca i16, align 2 1006 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1007 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1008 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1009 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1010 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1011 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1012 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1013 // CHECK1-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 1014 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1015 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1016 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1017 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1018 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1019 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1020 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 1021 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1022 // CHECK1: cond.true: 1023 // CHECK1-NEXT: br label [[COND_END:%.*]] 1024 // CHECK1: cond.false: 1025 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1026 // CHECK1-NEXT: br label [[COND_END]] 1027 // CHECK1: cond.end: 1028 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1029 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1030 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1031 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1032 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1033 // CHECK1: omp.inner.for.cond: 1034 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 1035 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32 1036 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1037 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1038 // CHECK1: omp.inner.for.body: 1039 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 1040 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 1041 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 1042 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 1043 // CHECK1-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32 1044 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 1045 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 1046 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !32 1047 // CHECK1-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 1048 // CHECK1-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 1049 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 1050 // CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 1051 // CHECK1-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !32 1052 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1053 // CHECK1: omp.body.continue: 1054 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1055 // CHECK1: omp.inner.for.inc: 1056 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 1057 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 1058 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 1059 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 1060 // CHECK1: omp.inner.for.end: 1061 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1062 // CHECK1: omp.loop.exit: 1063 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1064 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1065 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1066 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1067 // CHECK1: .omp.final.then: 1068 // CHECK1-NEXT: store i16 22, i16* [[IT]], align 2 1069 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1070 // CHECK1: .omp.final.done: 1071 // CHECK1-NEXT: ret void 1072 // 1073 // 1074 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 1075 // CHECK1-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 1076 // CHECK1-NEXT: entry: 1077 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1078 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 1079 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1080 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 1081 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 1082 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1083 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 1084 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 1085 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 1086 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1087 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1088 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 1089 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1090 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 1091 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1092 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 1093 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 1094 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1095 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 1096 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 1097 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 1098 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1099 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1100 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 1101 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1102 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 1103 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 1104 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1105 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 1106 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 1107 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 1108 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1109 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 1110 // CHECK1-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1111 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 1112 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 1113 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 1114 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 1115 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 1116 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 1117 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 1118 // CHECK1-NEXT: ret void 1119 // 1120 // 1121 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 1122 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 1123 // CHECK1-NEXT: entry: 1124 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1125 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1126 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1127 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 1128 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1129 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 1130 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 1131 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1132 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 1133 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 1134 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 1135 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1136 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1137 // CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1 1138 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1139 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1140 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1141 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1142 // CHECK1-NEXT: [[IT:%.*]] = alloca i8, align 1 1143 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1144 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1145 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1146 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 1147 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1148 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 1149 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 1150 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1151 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 1152 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 1153 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 1154 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1155 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1156 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 1157 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1158 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 1159 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 1160 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1161 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 1162 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 1163 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 1164 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1165 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1166 // CHECK1-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 1167 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1168 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1169 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 1170 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1171 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1172 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 1173 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 1174 // CHECK1: omp.dispatch.cond: 1175 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1176 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 1177 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1178 // CHECK1: cond.true: 1179 // CHECK1-NEXT: br label [[COND_END:%.*]] 1180 // CHECK1: cond.false: 1181 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1182 // CHECK1-NEXT: br label [[COND_END]] 1183 // CHECK1: cond.end: 1184 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1185 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1186 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1187 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1188 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1189 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1190 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 1191 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 1192 // CHECK1: omp.dispatch.body: 1193 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1194 // CHECK1: omp.inner.for.cond: 1195 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 1196 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 1197 // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 1198 // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1199 // CHECK1: omp.inner.for.body: 1200 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 1201 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 1202 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 1203 // CHECK1-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 1204 // CHECK1-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35 1205 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !35 1206 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 1207 // CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !35 1208 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 1209 // CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35 1210 // CHECK1-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 1211 // CHECK1-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 1212 // CHECK1-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 1213 // CHECK1-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35 1214 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 1215 // CHECK1-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 1216 // CHECK1-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 1217 // CHECK1-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 1218 // CHECK1-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 1219 // CHECK1-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 1220 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 1221 // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 1222 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 1223 // CHECK1-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 1224 // CHECK1-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 1225 // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 1226 // CHECK1-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 1227 // CHECK1-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 1228 // CHECK1-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 1229 // CHECK1-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 1230 // CHECK1-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 1231 // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 1232 // CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35 1233 // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 1234 // CHECK1-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35 1235 // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 1236 // CHECK1-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35 1237 // CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 1238 // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 1239 // CHECK1-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 1240 // CHECK1-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35 1241 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1242 // CHECK1: omp.body.continue: 1243 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1244 // CHECK1: omp.inner.for.inc: 1245 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 1246 // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 1247 // CHECK1-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 1248 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 1249 // CHECK1: omp.inner.for.end: 1250 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 1251 // CHECK1: omp.dispatch.inc: 1252 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1253 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1254 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 1255 // CHECK1-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 1256 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1257 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1258 // CHECK1-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 1259 // CHECK1-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 1260 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 1261 // CHECK1: omp.dispatch.end: 1262 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 1263 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1264 // CHECK1-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 1265 // CHECK1-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1266 // CHECK1: .omp.final.then: 1267 // CHECK1-NEXT: store i8 96, i8* [[IT]], align 1 1268 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1269 // CHECK1: .omp.final.done: 1270 // CHECK1-NEXT: ret void 1271 // 1272 // 1273 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari 1274 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1275 // CHECK1-NEXT: entry: 1276 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1277 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1278 // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 1279 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1280 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1281 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1282 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 1283 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 1284 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 1285 // CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4 1286 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 1287 // CHECK1-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 1288 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1289 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 1290 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 1291 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1292 // CHECK1-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 1293 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 1294 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 1295 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 1296 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 1297 // CHECK1-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 1298 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 1299 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 1300 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 1301 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 1302 // CHECK1-NEXT: ret i32 [[TMP8]] 1303 // 1304 // 1305 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 1306 // CHECK1-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 1307 // CHECK1-NEXT: entry: 1308 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1309 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1310 // CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 1311 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1312 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1313 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 1314 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 1315 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 1316 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 1317 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 1318 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1319 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1320 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1321 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1322 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 1323 // CHECK1-NEXT: store i32 [[ADD]], i32* [[B]], align 4 1324 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 1325 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 1326 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 1327 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 1328 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 1329 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 1330 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 1331 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 1332 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 1333 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 1334 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 1335 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 1336 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 1337 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1338 // CHECK1: omp_if.then: 1339 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 1340 // CHECK1-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 1341 // CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 1342 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1343 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 1344 // CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 1345 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1346 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 1347 // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 1348 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 1349 // CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 1350 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1351 // CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 1352 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1353 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 1354 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 1355 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1356 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 1357 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 1358 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 1359 // CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 1360 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1361 // CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 1362 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1363 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 1364 // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 1365 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1366 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 1367 // CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 1368 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 1369 // CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 1370 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1371 // CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 1372 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 1373 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 1374 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 1375 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 1376 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 1377 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 1378 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 1379 // CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 1380 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 1381 // CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 1382 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 1383 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 1384 // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 1385 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 1386 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 1387 // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 1388 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 1389 // CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 1390 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 1391 // CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 1392 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1393 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1394 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 1395 // CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1396 // CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 1397 // CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1398 // CHECK1: omp_offload.failed: 1399 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 1400 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1401 // CHECK1: omp_offload.cont: 1402 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1403 // CHECK1: omp_if.else: 1404 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 1405 // CHECK1-NEXT: br label [[OMP_IF_END]] 1406 // CHECK1: omp_if.end: 1407 // CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 1408 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 1409 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 1410 // CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 1411 // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 1412 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 1413 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 1414 // CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 1415 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 1416 // CHECK1-NEXT: ret i32 [[ADD4]] 1417 // 1418 // 1419 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici 1420 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1421 // CHECK1-NEXT: entry: 1422 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1423 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1424 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 1425 // CHECK1-NEXT: [[AAA:%.*]] = alloca i8, align 1 1426 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 1427 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1428 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1429 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 1430 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 1431 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 1432 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 1433 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1434 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1435 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 1436 // CHECK1-NEXT: store i8 0, i8* [[AAA]], align 1 1437 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 1438 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1439 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 1440 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 1441 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 1442 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1443 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 1444 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1445 // CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 1446 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 1447 // CHECK1-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 1448 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 1449 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 1450 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 1451 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1452 // CHECK1: omp_if.then: 1453 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1454 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 1455 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 1456 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1457 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 1458 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 1459 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1460 // CHECK1-NEXT: store i8* null, i8** [[TMP11]], align 8 1461 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1462 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 1463 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 1464 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1465 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 1466 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 1467 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1468 // CHECK1-NEXT: store i8* null, i8** [[TMP16]], align 8 1469 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1470 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 1471 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 1472 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1473 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 1474 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 1475 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1476 // CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 1477 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 1478 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 1479 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 1480 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 1481 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 1482 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 1483 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 1484 // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 1485 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1486 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1487 // CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1488 // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 1489 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1490 // CHECK1: omp_offload.failed: 1491 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 1492 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1493 // CHECK1: omp_offload.cont: 1494 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1495 // CHECK1: omp_if.else: 1496 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 1497 // CHECK1-NEXT: br label [[OMP_IF_END]] 1498 // CHECK1: omp_if.end: 1499 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 1500 // CHECK1-NEXT: ret i32 [[TMP31]] 1501 // 1502 // 1503 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 1504 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 1505 // CHECK1-NEXT: entry: 1506 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1507 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1508 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 1509 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 1510 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1511 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1512 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 1513 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 1514 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 1515 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1516 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1517 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 1518 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 1519 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1520 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 1521 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 1522 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 1523 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1524 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 1525 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1526 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1527 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 1528 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1529 // CHECK1: omp_if.then: 1530 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1531 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 1532 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 1533 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1534 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 1535 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 1536 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1537 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 1538 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1539 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 1540 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 1541 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1542 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 1543 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 1544 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1545 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 1546 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1547 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 1548 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 1549 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1550 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 1551 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 1552 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1553 // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 1554 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1555 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1556 // CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1557 // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 1558 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1559 // CHECK1: omp_offload.failed: 1560 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 1561 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1562 // CHECK1: omp_offload.cont: 1563 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1564 // CHECK1: omp_if.else: 1565 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 1566 // CHECK1-NEXT: br label [[OMP_IF_END]] 1567 // CHECK1: omp_if.end: 1568 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 1569 // CHECK1-NEXT: ret i32 [[TMP24]] 1570 // 1571 // 1572 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 1573 // CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 1574 // CHECK1-NEXT: entry: 1575 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1576 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1577 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1578 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1579 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 1580 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 1581 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1582 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1583 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1584 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1585 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 1586 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1587 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 1588 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1589 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1590 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 1591 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 1592 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 1593 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 1594 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 1595 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 1596 // CHECK1-NEXT: ret void 1597 // 1598 // 1599 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 1600 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 1601 // CHECK1-NEXT: entry: 1602 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1603 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1604 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1605 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1606 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1607 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1608 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 1609 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 1610 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 1611 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 1612 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 1613 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 1614 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1615 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 1616 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1617 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1618 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1619 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1620 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1621 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1622 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 1623 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1624 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 1625 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1626 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1627 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 1628 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 1629 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 1630 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 1631 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1632 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1633 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 1634 // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 1635 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1636 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 1637 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1638 // CHECK1: cond.true: 1639 // CHECK1-NEXT: br label [[COND_END:%.*]] 1640 // CHECK1: cond.false: 1641 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1642 // CHECK1-NEXT: br label [[COND_END]] 1643 // CHECK1: cond.end: 1644 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 1645 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 1646 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 1647 // CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 1648 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1649 // CHECK1: omp.inner.for.cond: 1650 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 1651 // CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38 1652 // CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 1653 // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1654 // CHECK1: omp.inner.for.body: 1655 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 1656 // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 1657 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 1658 // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38 1659 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !38 1660 // CHECK1-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 1661 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 1662 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1663 // CHECK1-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group !38 1664 // CHECK1-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 1665 // CHECK1-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !38 1666 // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 1667 // CHECK1-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group !38 1668 // CHECK1-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 1669 // CHECK1-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 1670 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 1671 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 1672 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !38 1673 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1674 // CHECK1: omp.body.continue: 1675 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1676 // CHECK1: omp.inner.for.inc: 1677 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 1678 // CHECK1-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 1679 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 1680 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] 1681 // CHECK1: omp.inner.for.end: 1682 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1683 // CHECK1: omp.loop.exit: 1684 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 1685 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1686 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1687 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1688 // CHECK1: .omp.final.then: 1689 // CHECK1-NEXT: store i64 400, i64* [[IT]], align 8 1690 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1691 // CHECK1: .omp.final.done: 1692 // CHECK1-NEXT: ret void 1693 // 1694 // 1695 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 1696 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 1697 // CHECK1-NEXT: entry: 1698 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1699 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1700 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 1701 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1702 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1703 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1704 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 1705 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1706 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1707 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 1708 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1709 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1710 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1711 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 1712 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1713 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1714 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1715 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 1716 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 1717 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 1718 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1719 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 1720 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1721 // CHECK1-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 1722 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 1723 // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 1724 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 1725 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 1726 // CHECK1-NEXT: ret void 1727 // 1728 // 1729 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 1730 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 1731 // CHECK1-NEXT: entry: 1732 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1733 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1734 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1735 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1736 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 1737 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1738 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1739 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1740 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1741 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1742 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1743 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1744 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 1745 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1746 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1747 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1748 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 1749 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1750 // CHECK1-NEXT: ret void 1751 // 1752 // 1753 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 1754 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 1755 // CHECK1-NEXT: entry: 1756 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1757 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1758 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1759 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1760 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1761 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1762 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1763 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1764 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1765 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1766 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1767 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1768 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1769 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 1770 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 1771 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 1772 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1773 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 1774 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1775 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 1776 // CHECK1-NEXT: ret void 1777 // 1778 // 1779 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 1780 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 1781 // CHECK1-NEXT: entry: 1782 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1783 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1784 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1785 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1786 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1787 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 1788 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 1789 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 1790 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 1791 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 1792 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1793 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 1794 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1795 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1796 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1797 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1798 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1799 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1800 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1801 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1802 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 1803 // CHECK1-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 1804 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 1805 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1806 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1807 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1808 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 1809 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1810 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 1811 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1812 // CHECK1: cond.true: 1813 // CHECK1-NEXT: br label [[COND_END:%.*]] 1814 // CHECK1: cond.false: 1815 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1816 // CHECK1-NEXT: br label [[COND_END]] 1817 // CHECK1: cond.end: 1818 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1819 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 1820 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 1821 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 1822 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1823 // CHECK1: omp.inner.for.cond: 1824 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 1825 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !41 1826 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 1827 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1828 // CHECK1: omp.inner.for.body: 1829 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 1830 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 1831 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 1832 // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !41 1833 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !41 1834 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 1835 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !41 1836 // CHECK1-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !41 1837 // CHECK1-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 1838 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 1839 // CHECK1-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 1840 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !41 1841 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 1842 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41 1843 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 1844 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41 1845 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1846 // CHECK1: omp.body.continue: 1847 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1848 // CHECK1: omp.inner.for.inc: 1849 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 1850 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 1851 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 1852 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] 1853 // CHECK1: omp.inner.for.end: 1854 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1855 // CHECK1: omp.loop.exit: 1856 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1857 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1858 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1859 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1860 // CHECK1: .omp.final.then: 1861 // CHECK1-NEXT: store i64 11, i64* [[I]], align 8 1862 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1863 // CHECK1: .omp.final.done: 1864 // CHECK1-NEXT: ret void 1865 // 1866 // 1867 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1868 // CHECK1-SAME: () #[[ATTR7:[0-9]+]] { 1869 // CHECK1-NEXT: entry: 1870 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 1871 // CHECK1-NEXT: ret void 1872 // 1873 // 1874 // CHECK2-LABEL: define {{[^@]+}}@_Z7get_valv 1875 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 1876 // CHECK2-NEXT: entry: 1877 // CHECK2-NEXT: ret i64 0 1878 // 1879 // 1880 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooi 1881 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1882 // CHECK2-NEXT: entry: 1883 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1884 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 1885 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 1886 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x float], align 4 1887 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1888 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1889 // CHECK2-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 1890 // CHECK2-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 1891 // CHECK2-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 1892 // CHECK2-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 1893 // CHECK2-NEXT: [[K:%.*]] = alloca i64, align 8 1894 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1895 // CHECK2-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 1896 // CHECK2-NEXT: [[LIN:%.*]] = alloca i32, align 4 1897 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1898 // CHECK2-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 1899 // CHECK2-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 1900 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 1901 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 1902 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 1903 // CHECK2-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 1904 // CHECK2-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 1905 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 1906 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 1907 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 1908 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1909 // CHECK2-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 1910 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 1911 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 1912 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 1913 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 1914 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 1915 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 1916 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1917 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 1918 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 1919 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 1920 // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 1921 // CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 1922 // CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 1923 // CHECK2-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 1924 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 1925 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1926 // CHECK2-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 1927 // CHECK2-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 1928 // CHECK2-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 1929 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 1930 // CHECK2-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 1931 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* 1932 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 1933 // CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) 1934 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 1935 // CHECK2-NEXT: store i64 [[CALL]], i64* [[K]], align 8 1936 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 1937 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1938 // CHECK2-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 1939 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 1940 // CHECK2-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 1941 // CHECK2-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 1942 // CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 1943 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] 1944 // CHECK2-NEXT: store i32 12, i32* [[LIN]], align 4 1945 // CHECK2-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 1946 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1947 // CHECK2-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 1948 // CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1949 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 1950 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 1951 // CHECK2-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 1952 // CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 1953 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 1954 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 1955 // CHECK2-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 1956 // CHECK2-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 1957 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1958 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 1959 // CHECK2-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 1960 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1961 // CHECK2-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* 1962 // CHECK2-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 1963 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1964 // CHECK2-NEXT: store i8* null, i8** [[TMP25]], align 8 1965 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1966 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 1967 // CHECK2-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 1968 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1969 // CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 1970 // CHECK2-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 1971 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1972 // CHECK2-NEXT: store i8* null, i8** [[TMP30]], align 8 1973 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1974 // CHECK2-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 1975 // CHECK2-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 1976 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1977 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 1978 // CHECK2-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 1979 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1980 // CHECK2-NEXT: store i8* null, i8** [[TMP35]], align 8 1981 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1982 // CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1983 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1984 // CHECK2-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 1985 // CHECK2-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1986 // CHECK2: omp_offload.failed: 1987 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] 1988 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 1989 // CHECK2: omp_offload.cont: 1990 // CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 1991 // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 1992 // CHECK2-NEXT: store i32 [[TMP40]], i32* [[CONV7]], align 4 1993 // CHECK2-NEXT: [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8 1994 // CHECK2-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2 1995 // CHECK2-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 1996 // CHECK2-NEXT: store i16 [[TMP42]], i16* [[CONV9]], align 2 1997 // CHECK2-NEXT: [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 1998 // CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4 1999 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10 2000 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 2001 // CHECK2: omp_if.then: 2002 // CHECK2-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 2003 // CHECK2-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 2004 // CHECK2-NEXT: store i64 [[TMP41]], i64* [[TMP46]], align 8 2005 // CHECK2-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 2006 // CHECK2-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* 2007 // CHECK2-NEXT: store i64 [[TMP41]], i64* [[TMP48]], align 8 2008 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 2009 // CHECK2-NEXT: store i8* null, i8** [[TMP49]], align 8 2010 // CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 2011 // CHECK2-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* 2012 // CHECK2-NEXT: store i64 [[TMP43]], i64* [[TMP51]], align 8 2013 // CHECK2-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 2014 // CHECK2-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* 2015 // CHECK2-NEXT: store i64 [[TMP43]], i64* [[TMP53]], align 8 2016 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 2017 // CHECK2-NEXT: store i8* null, i8** [[TMP54]], align 8 2018 // CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 2019 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 2020 // CHECK2-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 2021 // CHECK2-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 2022 // CHECK2-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 2023 // CHECK2: omp_offload.failed13: 2024 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 2025 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT14]] 2026 // CHECK2: omp_offload.cont14: 2027 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 2028 // CHECK2: omp_if.else: 2029 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 2030 // CHECK2-NEXT: br label [[OMP_IF_END]] 2031 // CHECK2: omp_if.end: 2032 // CHECK2-NEXT: [[TMP59:%.*]] = load i32, i32* [[A]], align 4 2033 // CHECK2-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4 2034 // CHECK2-NEXT: [[TMP60:%.*]] = load i32, i32* [[A]], align 4 2035 // CHECK2-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 2036 // CHECK2-NEXT: store i32 [[TMP60]], i32* [[CONV16]], align 4 2037 // CHECK2-NEXT: [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8 2038 // CHECK2-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2039 // CHECK2-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 2040 // CHECK2-NEXT: store i32 [[TMP62]], i32* [[CONV17]], align 4 2041 // CHECK2-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 2042 // CHECK2-NEXT: [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4 2043 // CHECK2-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20 2044 // CHECK2-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 2045 // CHECK2: omp_if.then19: 2046 // CHECK2-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4 2047 // CHECK2-NEXT: [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]] 2048 // CHECK2-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8 2049 // CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2050 // CHECK2-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 2051 // CHECK2-NEXT: store i64 [[TMP61]], i64* [[TMP69]], align 8 2052 // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2053 // CHECK2-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* 2054 // CHECK2-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 2055 // CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2056 // CHECK2-NEXT: store i64 4, i64* [[TMP72]], align 8 2057 // CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 2058 // CHECK2-NEXT: store i8* null, i8** [[TMP73]], align 8 2059 // CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 2060 // CHECK2-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 2061 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 2062 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 2063 // CHECK2-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** 2064 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 2065 // CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 2066 // CHECK2-NEXT: store i64 40, i64* [[TMP78]], align 8 2067 // CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 2068 // CHECK2-NEXT: store i8* null, i8** [[TMP79]], align 8 2069 // CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 2070 // CHECK2-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* 2071 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 2072 // CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 2073 // CHECK2-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* 2074 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 2075 // CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 2076 // CHECK2-NEXT: store i64 8, i64* [[TMP84]], align 8 2077 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 2078 // CHECK2-NEXT: store i8* null, i8** [[TMP85]], align 8 2079 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 2080 // CHECK2-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 2081 // CHECK2-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 2082 // CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 2083 // CHECK2-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** 2084 // CHECK2-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 2085 // CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 2086 // CHECK2-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 2087 // CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 2088 // CHECK2-NEXT: store i8* null, i8** [[TMP91]], align 8 2089 // CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 2090 // CHECK2-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 2091 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 2092 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 2093 // CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** 2094 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 2095 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 2096 // CHECK2-NEXT: store i64 400, i64* [[TMP96]], align 8 2097 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 2098 // CHECK2-NEXT: store i8* null, i8** [[TMP97]], align 8 2099 // CHECK2-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 2100 // CHECK2-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* 2101 // CHECK2-NEXT: store i64 5, i64* [[TMP99]], align 8 2102 // CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 2103 // CHECK2-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* 2104 // CHECK2-NEXT: store i64 5, i64* [[TMP101]], align 8 2105 // CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 2106 // CHECK2-NEXT: store i64 8, i64* [[TMP102]], align 8 2107 // CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 2108 // CHECK2-NEXT: store i8* null, i8** [[TMP103]], align 8 2109 // CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 2110 // CHECK2-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* 2111 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 2112 // CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 2113 // CHECK2-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* 2114 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 2115 // CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 2116 // CHECK2-NEXT: store i64 8, i64* [[TMP108]], align 8 2117 // CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 2118 // CHECK2-NEXT: store i8* null, i8** [[TMP109]], align 8 2119 // CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 2120 // CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 2121 // CHECK2-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 2122 // CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 2123 // CHECK2-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** 2124 // CHECK2-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 2125 // CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 2126 // CHECK2-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 2127 // CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 2128 // CHECK2-NEXT: store i8* null, i8** [[TMP115]], align 8 2129 // CHECK2-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 2130 // CHECK2-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 2131 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 2132 // CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 2133 // CHECK2-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** 2134 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 2135 // CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 2136 // CHECK2-NEXT: store i64 16, i64* [[TMP120]], align 8 2137 // CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 2138 // CHECK2-NEXT: store i8* null, i8** [[TMP121]], align 8 2139 // CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 2140 // CHECK2-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* 2141 // CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 2142 // CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 2143 // CHECK2-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* 2144 // CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 2145 // CHECK2-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 2146 // CHECK2-NEXT: store i64 4, i64* [[TMP126]], align 8 2147 // CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 2148 // CHECK2-NEXT: store i8* null, i8** [[TMP127]], align 8 2149 // CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2150 // CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2151 // CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2152 // CHECK2-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 2153 // CHECK2-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 2154 // CHECK2-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 2155 // CHECK2: omp_offload.failed23: 2156 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 2157 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT24]] 2158 // CHECK2: omp_offload.cont24: 2159 // CHECK2-NEXT: br label [[OMP_IF_END26:%.*]] 2160 // CHECK2: omp_if.else25: 2161 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 2162 // CHECK2-NEXT: br label [[OMP_IF_END26]] 2163 // CHECK2: omp_if.end26: 2164 // CHECK2-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 2165 // CHECK2-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 2166 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) 2167 // CHECK2-NEXT: ret i32 [[TMP133]] 2168 // 2169 // 2170 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 2171 // CHECK2-SAME: () #[[ATTR2:[0-9]+]] { 2172 // CHECK2-NEXT: entry: 2173 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2174 // CHECK2-NEXT: ret void 2175 // 2176 // 2177 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 2178 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 2179 // CHECK2-NEXT: entry: 2180 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2181 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2182 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2183 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2184 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2185 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2186 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2187 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2188 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2189 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2190 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2191 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2192 // CHECK2-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 2193 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2194 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2195 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2196 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2197 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2198 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2199 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 2200 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2201 // CHECK2: cond.true: 2202 // CHECK2-NEXT: br label [[COND_END:%.*]] 2203 // CHECK2: cond.false: 2204 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2205 // CHECK2-NEXT: br label [[COND_END]] 2206 // CHECK2: cond.end: 2207 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2208 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2209 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2210 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2211 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2212 // CHECK2: omp.inner.for.cond: 2213 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 2214 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 2215 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2216 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2217 // CHECK2: omp.inner.for.body: 2218 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 2219 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 2220 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 2221 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10 2222 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2223 // CHECK2: omp.body.continue: 2224 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2225 // CHECK2: omp.inner.for.inc: 2226 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 2227 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 2228 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 2229 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 2230 // CHECK2: omp.inner.for.end: 2231 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2232 // CHECK2: omp.loop.exit: 2233 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2234 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2235 // CHECK2-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 2236 // CHECK2-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2237 // CHECK2: .omp.final.then: 2238 // CHECK2-NEXT: store i32 33, i32* [[I]], align 4 2239 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2240 // CHECK2: .omp.final.done: 2241 // CHECK2-NEXT: ret void 2242 // 2243 // 2244 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry. 2245 // CHECK2-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 2246 // CHECK2-NEXT: entry: 2247 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 2248 // CHECK2-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 2249 // CHECK2-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 2250 // CHECK2-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 2251 // CHECK2-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 2252 // CHECK2-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 2253 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 2254 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 2255 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 2256 // CHECK2-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 2257 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 2258 // CHECK2-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 2259 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 2260 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 2261 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 2262 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2263 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 2264 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 2265 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 2266 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 2267 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 2268 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 2269 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 2270 // CHECK2-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 2271 // CHECK2-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 2272 // CHECK2-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 2273 // CHECK2-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 2274 // CHECK2-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 2275 // CHECK2-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 2276 // CHECK2-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 2277 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2278 // CHECK2-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 2279 // CHECK2: omp_offload.failed.i: 2280 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 2281 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 2282 // CHECK2: .omp_outlined..1.exit: 2283 // CHECK2-NEXT: ret i32 0 2284 // 2285 // 2286 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 2287 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 2288 // CHECK2-NEXT: entry: 2289 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2290 // CHECK2-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 2291 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2292 // CHECK2-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 2293 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2294 // CHECK2-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 2295 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2296 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 2297 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2298 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 2299 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 2300 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 2301 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 2302 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 2303 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 2304 // CHECK2-NEXT: ret void 2305 // 2306 // 2307 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2 2308 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 2309 // CHECK2-NEXT: entry: 2310 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2311 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2312 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2313 // CHECK2-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 2314 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2315 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2316 // CHECK2-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 2317 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2318 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2319 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2320 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2321 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2322 // CHECK2-NEXT: [[K1:%.*]] = alloca i64, align 8 2323 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2324 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2325 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2326 // CHECK2-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 2327 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2328 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 2329 // CHECK2-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 2330 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2331 // CHECK2-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 2332 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2333 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2334 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2335 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2336 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 2337 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) 2338 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2339 // CHECK2: omp.dispatch.cond: 2340 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 2341 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 2342 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 2343 // CHECK2: omp.dispatch.body: 2344 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2345 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2346 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2347 // CHECK2: omp.inner.for.cond: 2348 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 2349 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26 2350 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2351 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2352 // CHECK2: omp.inner.for.body: 2353 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 2354 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 2355 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 2356 // CHECK2-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26 2357 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26 2358 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 2359 // CHECK2-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 2360 // CHECK2-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 2361 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 2362 // CHECK2-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26 2363 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 2364 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 2365 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !26 2366 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2367 // CHECK2: omp.body.continue: 2368 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2369 // CHECK2: omp.inner.for.inc: 2370 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 2371 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 2372 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 2373 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 2374 // CHECK2: omp.inner.for.end: 2375 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 2376 // CHECK2: omp.dispatch.inc: 2377 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 2378 // CHECK2: omp.dispatch.end: 2379 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2380 // CHECK2-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 2381 // CHECK2-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2382 // CHECK2: .omp.final.then: 2383 // CHECK2-NEXT: store i32 1, i32* [[I]], align 4 2384 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2385 // CHECK2: .omp.final.done: 2386 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2387 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 2388 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 2389 // CHECK2: .omp.linear.pu: 2390 // CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 2391 // CHECK2-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 2392 // CHECK2-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 2393 // CHECK2: .omp.linear.pu.done: 2394 // CHECK2-NEXT: ret void 2395 // 2396 // 2397 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 2398 // CHECK2-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 2399 // CHECK2-NEXT: entry: 2400 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2401 // CHECK2-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 2402 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2403 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 2404 // CHECK2-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 2405 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2406 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2407 // CHECK2-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 2408 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2409 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2410 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 2411 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2412 // CHECK2-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 2413 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 2414 // CHECK2-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 2415 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 2416 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 2417 // CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 2418 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 2419 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 2420 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 2421 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2422 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 2423 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 2424 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 2425 // CHECK2-NEXT: ret void 2426 // 2427 // 2428 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 2429 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 2430 // CHECK2-NEXT: entry: 2431 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2432 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2433 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2434 // CHECK2-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 2435 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2436 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 2437 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 2438 // CHECK2-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 2439 // CHECK2-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 2440 // CHECK2-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 2441 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 2442 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 2443 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 2444 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2445 // CHECK2-NEXT: [[IT:%.*]] = alloca i64, align 8 2446 // CHECK2-NEXT: [[LIN4:%.*]] = alloca i32, align 4 2447 // CHECK2-NEXT: [[A5:%.*]] = alloca i32, align 4 2448 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2449 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2450 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2451 // CHECK2-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 2452 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2453 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2454 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 2455 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2456 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 2457 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 2458 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 2459 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 2460 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 2461 // CHECK2-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 2462 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 2463 // CHECK2-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 2464 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 2465 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2466 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2467 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2468 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 2469 // CHECK2-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 2470 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 2471 // CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 2472 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2473 // CHECK2: cond.true: 2474 // CHECK2-NEXT: br label [[COND_END:%.*]] 2475 // CHECK2: cond.false: 2476 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 2477 // CHECK2-NEXT: br label [[COND_END]] 2478 // CHECK2: cond.end: 2479 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2480 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 2481 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 2482 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 2483 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2484 // CHECK2: omp.inner.for.cond: 2485 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2486 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 2487 // CHECK2-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 2488 // CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2489 // CHECK2: omp.inner.for.body: 2490 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2491 // CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 2492 // CHECK2-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 2493 // CHECK2-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29 2494 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29 2495 // CHECK2-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 2496 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2497 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 2498 // CHECK2-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 2499 // CHECK2-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 2500 // CHECK2-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 2501 // CHECK2-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29 2502 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29 2503 // CHECK2-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 2504 // CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2505 // CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 2506 // CHECK2-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 2507 // CHECK2-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 2508 // CHECK2-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 2509 // CHECK2-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29 2510 // CHECK2-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !29 2511 // CHECK2-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 2512 // CHECK2-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 2513 // CHECK2-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 2514 // CHECK2-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !29 2515 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2516 // CHECK2: omp.body.continue: 2517 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2518 // CHECK2: omp.inner.for.inc: 2519 // CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2520 // CHECK2-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 2521 // CHECK2-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 2522 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 2523 // CHECK2: omp.inner.for.end: 2524 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2525 // CHECK2: omp.loop.exit: 2526 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2527 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2528 // CHECK2-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 2529 // CHECK2-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2530 // CHECK2: .omp.final.then: 2531 // CHECK2-NEXT: store i64 400, i64* [[IT]], align 8 2532 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2533 // CHECK2: .omp.final.done: 2534 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2535 // CHECK2-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 2536 // CHECK2-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 2537 // CHECK2: .omp.linear.pu: 2538 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 2539 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 2540 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 2541 // CHECK2-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 2542 // CHECK2-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 2543 // CHECK2: .omp.linear.pu.done: 2544 // CHECK2-NEXT: ret void 2545 // 2546 // 2547 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 2548 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 2549 // CHECK2-NEXT: entry: 2550 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2551 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2552 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2553 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 2554 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2555 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2556 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2557 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2558 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 2559 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2560 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 2561 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 2562 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 2563 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 2564 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 2565 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 2566 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 2567 // CHECK2-NEXT: ret void 2568 // 2569 // 2570 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 2571 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 2572 // CHECK2-NEXT: entry: 2573 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2574 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2575 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2576 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2577 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2578 // CHECK2-NEXT: [[TMP:%.*]] = alloca i16, align 2 2579 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2580 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2581 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2582 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2583 // CHECK2-NEXT: [[IT:%.*]] = alloca i16, align 2 2584 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2585 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2586 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2587 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2588 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2589 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2590 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2591 // CHECK2-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 2592 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2593 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2594 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2595 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2596 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2597 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2598 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 2599 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2600 // CHECK2: cond.true: 2601 // CHECK2-NEXT: br label [[COND_END:%.*]] 2602 // CHECK2: cond.false: 2603 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2604 // CHECK2-NEXT: br label [[COND_END]] 2605 // CHECK2: cond.end: 2606 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2607 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2608 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2609 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2610 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2611 // CHECK2: omp.inner.for.cond: 2612 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2613 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32 2614 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2615 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2616 // CHECK2: omp.inner.for.body: 2617 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2618 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 2619 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 2620 // CHECK2-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 2621 // CHECK2-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32 2622 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 2623 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 2624 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !32 2625 // CHECK2-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 2626 // CHECK2-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 2627 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 2628 // CHECK2-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 2629 // CHECK2-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !32 2630 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2631 // CHECK2: omp.body.continue: 2632 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2633 // CHECK2: omp.inner.for.inc: 2634 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2635 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 2636 // CHECK2-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2637 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 2638 // CHECK2: omp.inner.for.end: 2639 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2640 // CHECK2: omp.loop.exit: 2641 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2642 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2643 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2644 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2645 // CHECK2: .omp.final.then: 2646 // CHECK2-NEXT: store i16 22, i16* [[IT]], align 2 2647 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2648 // CHECK2: .omp.final.done: 2649 // CHECK2-NEXT: ret void 2650 // 2651 // 2652 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 2653 // CHECK2-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 2654 // CHECK2-NEXT: entry: 2655 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2656 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 2657 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2658 // CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 2659 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 2660 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 2661 // CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 2662 // CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 2663 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 2664 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2665 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2666 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 2667 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2668 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 2669 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2670 // CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 2671 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 2672 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 2673 // CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 2674 // CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 2675 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 2676 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2677 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2678 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 2679 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2680 // CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 2681 // CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 2682 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 2683 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 2684 // CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 2685 // CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 2686 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2687 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 2688 // CHECK2-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2689 // CHECK2-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 2690 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 2691 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 2692 // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 2693 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 2694 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 2695 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 2696 // CHECK2-NEXT: ret void 2697 // 2698 // 2699 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7 2700 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 2701 // CHECK2-NEXT: entry: 2702 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2703 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2704 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2705 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 2706 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2707 // CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 2708 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 2709 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 2710 // CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 2711 // CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 2712 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 2713 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2714 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2715 // CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1 2716 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2717 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2718 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2719 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2720 // CHECK2-NEXT: [[IT:%.*]] = alloca i8, align 1 2721 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2722 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2723 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2724 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 2725 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2726 // CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 2727 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 2728 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 2729 // CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 2730 // CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 2731 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 2732 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2733 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2734 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 2735 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2736 // CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 2737 // CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 2738 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 2739 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 2740 // CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 2741 // CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 2742 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2743 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2744 // CHECK2-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 2745 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2746 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2747 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 2748 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2749 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 2750 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 2751 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2752 // CHECK2: omp.dispatch.cond: 2753 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2754 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 2755 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2756 // CHECK2: cond.true: 2757 // CHECK2-NEXT: br label [[COND_END:%.*]] 2758 // CHECK2: cond.false: 2759 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2760 // CHECK2-NEXT: br label [[COND_END]] 2761 // CHECK2: cond.end: 2762 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2763 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2764 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2765 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2766 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2767 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2768 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 2769 // CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 2770 // CHECK2: omp.dispatch.body: 2771 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2772 // CHECK2: omp.inner.for.cond: 2773 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2774 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 2775 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 2776 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2777 // CHECK2: omp.inner.for.body: 2778 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2779 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 2780 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 2781 // CHECK2-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 2782 // CHECK2-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35 2783 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !35 2784 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 2785 // CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !35 2786 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 2787 // CHECK2-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35 2788 // CHECK2-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 2789 // CHECK2-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 2790 // CHECK2-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 2791 // CHECK2-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35 2792 // CHECK2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 2793 // CHECK2-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 2794 // CHECK2-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 2795 // CHECK2-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 2796 // CHECK2-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 2797 // CHECK2-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 2798 // CHECK2-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 2799 // CHECK2-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 2800 // CHECK2-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 2801 // CHECK2-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 2802 // CHECK2-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 2803 // CHECK2-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 2804 // CHECK2-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 2805 // CHECK2-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 2806 // CHECK2-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 2807 // CHECK2-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 2808 // CHECK2-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 2809 // CHECK2-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 2810 // CHECK2-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35 2811 // CHECK2-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 2812 // CHECK2-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35 2813 // CHECK2-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 2814 // CHECK2-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35 2815 // CHECK2-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 2816 // CHECK2-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 2817 // CHECK2-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 2818 // CHECK2-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35 2819 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2820 // CHECK2: omp.body.continue: 2821 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2822 // CHECK2: omp.inner.for.inc: 2823 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2824 // CHECK2-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 2825 // CHECK2-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2826 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 2827 // CHECK2: omp.inner.for.end: 2828 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 2829 // CHECK2: omp.dispatch.inc: 2830 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2831 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 2832 // CHECK2-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 2833 // CHECK2-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 2834 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2835 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 2836 // CHECK2-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 2837 // CHECK2-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 2838 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 2839 // CHECK2: omp.dispatch.end: 2840 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 2841 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2842 // CHECK2-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 2843 // CHECK2-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2844 // CHECK2: .omp.final.then: 2845 // CHECK2-NEXT: store i8 96, i8* [[IT]], align 1 2846 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2847 // CHECK2: .omp.final.done: 2848 // CHECK2-NEXT: ret void 2849 // 2850 // 2851 // CHECK2-LABEL: define {{[^@]+}}@_Z3bari 2852 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 2853 // CHECK2-NEXT: entry: 2854 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2855 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 2856 // CHECK2-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 2857 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2858 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 2859 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2860 // CHECK2-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 2861 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 2862 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 2863 // CHECK2-NEXT: store i32 [[ADD]], i32* [[A]], align 4 2864 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 2865 // CHECK2-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 2866 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 2867 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 2868 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 2869 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2870 // CHECK2-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 2871 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 2872 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 2873 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 2874 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 2875 // CHECK2-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 2876 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 2877 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 2878 // CHECK2-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 2879 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 2880 // CHECK2-NEXT: ret i32 [[TMP8]] 2881 // 2882 // 2883 // CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 2884 // CHECK2-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 2885 // CHECK2-NEXT: entry: 2886 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 2887 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 2888 // CHECK2-NEXT: [[B:%.*]] = alloca i32, align 4 2889 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 2890 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 2891 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 2892 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 2893 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 2894 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 2895 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 2896 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 2897 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2898 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 2899 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 2900 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 2901 // CHECK2-NEXT: store i32 [[ADD]], i32* [[B]], align 4 2902 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2903 // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 2904 // CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 2905 // CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 2906 // CHECK2-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 2907 // CHECK2-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 2908 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 2909 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 2910 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 2911 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 2912 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 2913 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 2914 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 2915 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 2916 // CHECK2: omp_if.then: 2917 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 2918 // CHECK2-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 2919 // CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 2920 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2921 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 2922 // CHECK2-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 2923 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2924 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 2925 // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 2926 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2927 // CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 2928 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 2929 // CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 2930 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 2931 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 2932 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 2933 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 2934 // CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 2935 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 2936 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 2937 // CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 2938 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 2939 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 2940 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 2941 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 2942 // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 2943 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 2944 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 2945 // CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 2946 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 2947 // CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 2948 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 2949 // CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 2950 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 2951 // CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 2952 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 2953 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 2954 // CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 2955 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 2956 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 2957 // CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 2958 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 2959 // CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 2960 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 2961 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 2962 // CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 2963 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 2964 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 2965 // CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 2966 // CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 2967 // CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 2968 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 2969 // CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 2970 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2971 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2972 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2973 // CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 2974 // CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 2975 // CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2976 // CHECK2: omp_offload.failed: 2977 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 2978 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 2979 // CHECK2: omp_offload.cont: 2980 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 2981 // CHECK2: omp_if.else: 2982 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 2983 // CHECK2-NEXT: br label [[OMP_IF_END]] 2984 // CHECK2: omp_if.end: 2985 // CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 2986 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 2987 // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 2988 // CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 2989 // CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 2990 // CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 2991 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 2992 // CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 2993 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 2994 // CHECK2-NEXT: ret i32 [[ADD4]] 2995 // 2996 // 2997 // CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici 2998 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 2999 // CHECK2-NEXT: entry: 3000 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3001 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 3002 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 3003 // CHECK2-NEXT: [[AAA:%.*]] = alloca i8, align 1 3004 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 3005 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3006 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3007 // CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 3008 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 3009 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 3010 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 3011 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3012 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 3013 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 3014 // CHECK2-NEXT: store i8 0, i8* [[AAA]], align 1 3015 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 3016 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3017 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 3018 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 3019 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 3020 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3021 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 3022 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3023 // CHECK2-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 3024 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 3025 // CHECK2-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 3026 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 3027 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 3028 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 3029 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3030 // CHECK2: omp_if.then: 3031 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3032 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 3033 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 3034 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3035 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 3036 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 3037 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3038 // CHECK2-NEXT: store i8* null, i8** [[TMP11]], align 8 3039 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3040 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 3041 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 3042 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3043 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 3044 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 3045 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3046 // CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 8 3047 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3048 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 3049 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 3050 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3051 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 3052 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 3053 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3054 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 3055 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 3056 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 3057 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 3058 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 3059 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 3060 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 3061 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 3062 // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 3063 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3064 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3065 // CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3066 // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 3067 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3068 // CHECK2: omp_offload.failed: 3069 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 3070 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 3071 // CHECK2: omp_offload.cont: 3072 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 3073 // CHECK2: omp_if.else: 3074 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 3075 // CHECK2-NEXT: br label [[OMP_IF_END]] 3076 // CHECK2: omp_if.end: 3077 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 3078 // CHECK2-NEXT: ret i32 [[TMP31]] 3079 // 3080 // 3081 // CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3082 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 3083 // CHECK2-NEXT: entry: 3084 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3085 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 3086 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 3087 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 3088 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3089 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3090 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 3091 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 3092 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 3093 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3094 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 3095 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 3096 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 3097 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3098 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 3099 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 3100 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 3101 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3102 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 3103 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3104 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3105 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 3106 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3107 // CHECK2: omp_if.then: 3108 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3109 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 3110 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 3111 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3112 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 3113 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 3114 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3115 // CHECK2-NEXT: store i8* null, i8** [[TMP9]], align 8 3116 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3117 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 3118 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 3119 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3120 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 3121 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 3122 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3123 // CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 3124 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3125 // CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 3126 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 3127 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3128 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 3129 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 3130 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3131 // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 3132 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3133 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3134 // CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3135 // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 3136 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3137 // CHECK2: omp_offload.failed: 3138 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 3139 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 3140 // CHECK2: omp_offload.cont: 3141 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 3142 // CHECK2: omp_if.else: 3143 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 3144 // CHECK2-NEXT: br label [[OMP_IF_END]] 3145 // CHECK2: omp_if.end: 3146 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 3147 // CHECK2-NEXT: ret i32 [[TMP24]] 3148 // 3149 // 3150 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 3151 // CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 3152 // CHECK2-NEXT: entry: 3153 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3154 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3155 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3156 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 3157 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 3158 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3159 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3160 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3161 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3162 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 3163 // CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 3164 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3165 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3166 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3167 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 3168 // CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 3169 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 3170 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 3171 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 3172 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 3173 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 3174 // CHECK2-NEXT: ret void 3175 // 3176 // 3177 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 3178 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 3179 // CHECK2-NEXT: entry: 3180 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3181 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3182 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3183 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3184 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3185 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 3186 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 3187 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 3188 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 3189 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 3190 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 3191 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 3192 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3193 // CHECK2-NEXT: [[IT:%.*]] = alloca i64, align 8 3194 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3195 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3196 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3197 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3198 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3199 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 3200 // CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 3201 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3202 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3203 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3204 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 3205 // CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 3206 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 3207 // CHECK2-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 3208 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 3209 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3210 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3211 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 3212 // CHECK2-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 3213 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3214 // CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 3215 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3216 // CHECK2: cond.true: 3217 // CHECK2-NEXT: br label [[COND_END:%.*]] 3218 // CHECK2: cond.false: 3219 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3220 // CHECK2-NEXT: br label [[COND_END]] 3221 // CHECK2: cond.end: 3222 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 3223 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 3224 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 3225 // CHECK2-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 3226 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3227 // CHECK2: omp.inner.for.cond: 3228 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 3229 // CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38 3230 // CHECK2-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 3231 // CHECK2-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3232 // CHECK2: omp.inner.for.body: 3233 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 3234 // CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 3235 // CHECK2-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 3236 // CHECK2-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38 3237 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !38 3238 // CHECK2-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 3239 // CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 3240 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3241 // CHECK2-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group !38 3242 // CHECK2-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 3243 // CHECK2-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !38 3244 // CHECK2-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 3245 // CHECK2-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group !38 3246 // CHECK2-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 3247 // CHECK2-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 3248 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 3249 // CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 3250 // CHECK2-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !38 3251 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3252 // CHECK2: omp.body.continue: 3253 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3254 // CHECK2: omp.inner.for.inc: 3255 // CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 3256 // CHECK2-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 3257 // CHECK2-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 3258 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] 3259 // CHECK2: omp.inner.for.end: 3260 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3261 // CHECK2: omp.loop.exit: 3262 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 3263 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3264 // CHECK2-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 3265 // CHECK2-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3266 // CHECK2: .omp.final.then: 3267 // CHECK2-NEXT: store i64 400, i64* [[IT]], align 8 3268 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 3269 // CHECK2: .omp.final.done: 3270 // CHECK2-NEXT: ret void 3271 // 3272 // 3273 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 3274 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 3275 // CHECK2-NEXT: entry: 3276 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3277 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3278 // CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 3279 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3280 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3281 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3282 // CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 3283 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3284 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3285 // CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 3286 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3287 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3288 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3289 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 3290 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3291 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3292 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3293 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 3294 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 3295 // CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 3296 // CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3297 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 3298 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3299 // CHECK2-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 3300 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 3301 // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 3302 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 3303 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 3304 // CHECK2-NEXT: ret void 3305 // 3306 // 3307 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 3308 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 3309 // CHECK2-NEXT: entry: 3310 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3311 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3312 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3313 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3314 // CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 3315 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3316 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3317 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 3318 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3319 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3320 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3321 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3322 // CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 3323 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3324 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3325 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3326 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 3327 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3328 // CHECK2-NEXT: ret void 3329 // 3330 // 3331 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 3332 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 3333 // CHECK2-NEXT: entry: 3334 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3335 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3336 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3337 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3338 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3339 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3340 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3341 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3342 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3343 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3344 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3345 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3346 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3347 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 3348 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 3349 // CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 3350 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3351 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 3352 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3353 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 3354 // CHECK2-NEXT: ret void 3355 // 3356 // 3357 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 3358 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 3359 // CHECK2-NEXT: entry: 3360 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3361 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3362 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3363 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3364 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3365 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 3366 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 3367 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 3368 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 3369 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 3370 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3371 // CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8 3372 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3373 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3374 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3375 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3376 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3377 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3378 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3379 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3380 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 3381 // CHECK2-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 3382 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 3383 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3384 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3385 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 3386 // CHECK2-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 3387 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3388 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 3389 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3390 // CHECK2: cond.true: 3391 // CHECK2-NEXT: br label [[COND_END:%.*]] 3392 // CHECK2: cond.false: 3393 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3394 // CHECK2-NEXT: br label [[COND_END]] 3395 // CHECK2: cond.end: 3396 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3397 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 3398 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 3399 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 3400 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3401 // CHECK2: omp.inner.for.cond: 3402 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 3403 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !41 3404 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 3405 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3406 // CHECK2: omp.inner.for.body: 3407 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 3408 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 3409 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 3410 // CHECK2-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !41 3411 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !41 3412 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 3413 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !41 3414 // CHECK2-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !41 3415 // CHECK2-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 3416 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 3417 // CHECK2-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 3418 // CHECK2-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !41 3419 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 3420 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41 3421 // CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 3422 // CHECK2-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41 3423 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3424 // CHECK2: omp.body.continue: 3425 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3426 // CHECK2: omp.inner.for.inc: 3427 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 3428 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 3429 // CHECK2-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41 3430 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] 3431 // CHECK2: omp.inner.for.end: 3432 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3433 // CHECK2: omp.loop.exit: 3434 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 3435 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3436 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 3437 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3438 // CHECK2: .omp.final.then: 3439 // CHECK2-NEXT: store i64 11, i64* [[I]], align 8 3440 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 3441 // CHECK2: .omp.final.done: 3442 // CHECK2-NEXT: ret void 3443 // 3444 // 3445 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 3446 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] { 3447 // CHECK2-NEXT: entry: 3448 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 3449 // CHECK2-NEXT: ret void 3450 // 3451 // 3452 // CHECK3-LABEL: define {{[^@]+}}@_Z7get_valv 3453 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 3454 // CHECK3-NEXT: entry: 3455 // CHECK3-NEXT: ret i64 0 3456 // 3457 // 3458 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooi 3459 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 3460 // CHECK3-NEXT: entry: 3461 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3462 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 3463 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 3464 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x float], align 4 3465 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 3466 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 3467 // CHECK3-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 3468 // CHECK3-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 3469 // CHECK3-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 3470 // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 3471 // CHECK3-NEXT: [[K:%.*]] = alloca i64, align 8 3472 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 3473 // CHECK3-NEXT: [[LIN:%.*]] = alloca i32, align 4 3474 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 3475 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 3476 // CHECK3-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 3477 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 3478 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 3479 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 3480 // CHECK3-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 3481 // CHECK3-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 3482 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 3483 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 3484 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 3485 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3486 // CHECK3-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 3487 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 3488 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 3489 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 3490 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 3491 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 3492 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 3493 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3494 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 3495 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 3496 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3497 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 3498 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 3499 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 3500 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 3501 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3502 // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 3503 // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 3504 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 3505 // CHECK3-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 3506 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* 3507 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 3508 // CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) 3509 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 3510 // CHECK3-NEXT: store i64 [[CALL]], i64* [[K]], align 8 3511 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 3512 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 3513 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 3514 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] 3515 // CHECK3-NEXT: store i32 12, i32* [[LIN]], align 4 3516 // CHECK3-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 3517 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 3518 // CHECK3-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 3519 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 3520 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 3521 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 3522 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 3523 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 3524 // CHECK3-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 3525 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 3526 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3527 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 3528 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 3529 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3530 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 3531 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 3532 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 3533 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 3534 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3535 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 3536 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 3537 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3538 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 3539 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 3540 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 3541 // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 3542 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3543 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 3544 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 3545 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3546 // CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* 3547 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 3548 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 3549 // CHECK3-NEXT: store i8* null, i8** [[TMP31]], align 4 3550 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3551 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3552 // CHECK3-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3553 // CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 3554 // CHECK3-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3555 // CHECK3: omp_offload.failed: 3556 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] 3557 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 3558 // CHECK3: omp_offload.cont: 3559 // CHECK3-NEXT: [[TMP36:%.*]] = load i32, i32* [[A]], align 4 3560 // CHECK3-NEXT: store i32 [[TMP36]], i32* [[A_CASTED3]], align 4 3561 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4 3562 // CHECK3-NEXT: [[TMP38:%.*]] = load i16, i16* [[AA]], align 2 3563 // CHECK3-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 3564 // CHECK3-NEXT: store i16 [[TMP38]], i16* [[CONV5]], align 2 3565 // CHECK3-NEXT: [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 3566 // CHECK3-NEXT: [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4 3567 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10 3568 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3569 // CHECK3: omp_if.then: 3570 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 3571 // CHECK3-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32* 3572 // CHECK3-NEXT: store i32 [[TMP37]], i32* [[TMP42]], align 4 3573 // CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 3574 // CHECK3-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 3575 // CHECK3-NEXT: store i32 [[TMP37]], i32* [[TMP44]], align 4 3576 // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 3577 // CHECK3-NEXT: store i8* null, i8** [[TMP45]], align 4 3578 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 3579 // CHECK3-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* 3580 // CHECK3-NEXT: store i32 [[TMP39]], i32* [[TMP47]], align 4 3581 // CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 3582 // CHECK3-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* 3583 // CHECK3-NEXT: store i32 [[TMP39]], i32* [[TMP49]], align 4 3584 // CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 3585 // CHECK3-NEXT: store i8* null, i8** [[TMP50]], align 4 3586 // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 3587 // CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 3588 // CHECK3-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3589 // CHECK3-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 3590 // CHECK3-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 3591 // CHECK3: omp_offload.failed9: 3592 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 3593 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT10]] 3594 // CHECK3: omp_offload.cont10: 3595 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 3596 // CHECK3: omp_if.else: 3597 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 3598 // CHECK3-NEXT: br label [[OMP_IF_END]] 3599 // CHECK3: omp_if.end: 3600 // CHECK3-NEXT: [[TMP55:%.*]] = load i32, i32* [[A]], align 4 3601 // CHECK3-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4 3602 // CHECK3-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 3603 // CHECK3-NEXT: store i32 [[TMP56]], i32* [[A_CASTED11]], align 4 3604 // CHECK3-NEXT: [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4 3605 // CHECK3-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3606 // CHECK3-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 3607 // CHECK3-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 3608 // CHECK3-NEXT: [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4 3609 // CHECK3-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20 3610 // CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 3611 // CHECK3: omp_if.then13: 3612 // CHECK3-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4 3613 // CHECK3-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 3614 // CHECK3-NEXT: [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]] 3615 // CHECK3-NEXT: [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8 3616 // CHECK3-NEXT: [[TMP65:%.*]] = sext i32 [[TMP64]] to i64 3617 // CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 3618 // CHECK3-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32* 3619 // CHECK3-NEXT: store i32 [[TMP57]], i32* [[TMP67]], align 4 3620 // CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 3621 // CHECK3-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 3622 // CHECK3-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 3623 // CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3624 // CHECK3-NEXT: store i64 4, i64* [[TMP70]], align 4 3625 // CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 3626 // CHECK3-NEXT: store i8* null, i8** [[TMP71]], align 4 3627 // CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 3628 // CHECK3-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** 3629 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 3630 // CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 3631 // CHECK3-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 3632 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 3633 // CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 3634 // CHECK3-NEXT: store i64 40, i64* [[TMP76]], align 4 3635 // CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 3636 // CHECK3-NEXT: store i8* null, i8** [[TMP77]], align 4 3637 // CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 3638 // CHECK3-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 3639 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 3640 // CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 3641 // CHECK3-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* 3642 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 3643 // CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 3644 // CHECK3-NEXT: store i64 4, i64* [[TMP82]], align 4 3645 // CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 3646 // CHECK3-NEXT: store i8* null, i8** [[TMP83]], align 4 3647 // CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 3648 // CHECK3-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** 3649 // CHECK3-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 3650 // CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 3651 // CHECK3-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 3652 // CHECK3-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 3653 // CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 3654 // CHECK3-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 3655 // CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 3656 // CHECK3-NEXT: store i8* null, i8** [[TMP89]], align 4 3657 // CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 3658 // CHECK3-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** 3659 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 3660 // CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 3661 // CHECK3-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 3662 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 3663 // CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 3664 // CHECK3-NEXT: store i64 400, i64* [[TMP94]], align 4 3665 // CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 3666 // CHECK3-NEXT: store i8* null, i8** [[TMP95]], align 4 3667 // CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 3668 // CHECK3-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* 3669 // CHECK3-NEXT: store i32 5, i32* [[TMP97]], align 4 3670 // CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 3671 // CHECK3-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* 3672 // CHECK3-NEXT: store i32 5, i32* [[TMP99]], align 4 3673 // CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 3674 // CHECK3-NEXT: store i64 4, i64* [[TMP100]], align 4 3675 // CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 3676 // CHECK3-NEXT: store i8* null, i8** [[TMP101]], align 4 3677 // CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 3678 // CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* 3679 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 3680 // CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 3681 // CHECK3-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* 3682 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 3683 // CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 3684 // CHECK3-NEXT: store i64 4, i64* [[TMP106]], align 4 3685 // CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 3686 // CHECK3-NEXT: store i8* null, i8** [[TMP107]], align 4 3687 // CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 3688 // CHECK3-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** 3689 // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 3690 // CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 3691 // CHECK3-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 3692 // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 3693 // CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 3694 // CHECK3-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 3695 // CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 3696 // CHECK3-NEXT: store i8* null, i8** [[TMP113]], align 4 3697 // CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 3698 // CHECK3-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** 3699 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 3700 // CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 3701 // CHECK3-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 3702 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 3703 // CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 3704 // CHECK3-NEXT: store i64 12, i64* [[TMP118]], align 4 3705 // CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 3706 // CHECK3-NEXT: store i8* null, i8** [[TMP119]], align 4 3707 // CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 3708 // CHECK3-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* 3709 // CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 3710 // CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 3711 // CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* 3712 // CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 3713 // CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 3714 // CHECK3-NEXT: store i64 4, i64* [[TMP124]], align 4 3715 // CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 3716 // CHECK3-NEXT: store i8* null, i8** [[TMP125]], align 4 3717 // CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 3718 // CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 3719 // CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3720 // CHECK3-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3721 // CHECK3-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 3722 // CHECK3-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 3723 // CHECK3: omp_offload.failed17: 3724 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 3725 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT18]] 3726 // CHECK3: omp_offload.cont18: 3727 // CHECK3-NEXT: br label [[OMP_IF_END20:%.*]] 3728 // CHECK3: omp_if.else19: 3729 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 3730 // CHECK3-NEXT: br label [[OMP_IF_END20]] 3731 // CHECK3: omp_if.end20: 3732 // CHECK3-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 3733 // CHECK3-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 3734 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) 3735 // CHECK3-NEXT: ret i32 [[TMP131]] 3736 // 3737 // 3738 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 3739 // CHECK3-SAME: () #[[ATTR2:[0-9]+]] { 3740 // CHECK3-NEXT: entry: 3741 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 3742 // CHECK3-NEXT: ret void 3743 // 3744 // 3745 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 3746 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 3747 // CHECK3-NEXT: entry: 3748 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3749 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3750 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3751 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3752 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3753 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3754 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3755 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3756 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3757 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3758 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3759 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3760 // CHECK3-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 3761 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3762 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3763 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3764 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3765 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3766 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3767 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 3768 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3769 // CHECK3: cond.true: 3770 // CHECK3-NEXT: br label [[COND_END:%.*]] 3771 // CHECK3: cond.false: 3772 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3773 // CHECK3-NEXT: br label [[COND_END]] 3774 // CHECK3: cond.end: 3775 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3776 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3777 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3778 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3779 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3780 // CHECK3: omp.inner.for.cond: 3781 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3782 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 3783 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3784 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3785 // CHECK3: omp.inner.for.body: 3786 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3787 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 3788 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 3789 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 3790 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3791 // CHECK3: omp.body.continue: 3792 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3793 // CHECK3: omp.inner.for.inc: 3794 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3795 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 3796 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3797 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 3798 // CHECK3: omp.inner.for.end: 3799 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3800 // CHECK3: omp.loop.exit: 3801 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 3802 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3803 // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 3804 // CHECK3-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3805 // CHECK3: .omp.final.then: 3806 // CHECK3-NEXT: store i32 33, i32* [[I]], align 4 3807 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3808 // CHECK3: .omp.final.done: 3809 // CHECK3-NEXT: ret void 3810 // 3811 // 3812 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry. 3813 // CHECK3-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 3814 // CHECK3-NEXT: entry: 3815 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 3816 // CHECK3-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 3817 // CHECK3-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 3818 // CHECK3-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 3819 // CHECK3-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 3820 // CHECK3-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 3821 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 3822 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 3823 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 3824 // CHECK3-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 3825 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 3826 // CHECK3-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 3827 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 3828 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 3829 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 3830 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 3831 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 3832 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 3833 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) 3834 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 3835 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 3836 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) 3837 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 3838 // CHECK3-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 3839 // CHECK3-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 3840 // CHECK3-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 3841 // CHECK3-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 3842 // CHECK3-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 3843 // CHECK3-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 3844 // CHECK3-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 3845 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3846 // CHECK3-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 3847 // CHECK3: omp_offload.failed.i: 3848 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 3849 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 3850 // CHECK3: .omp_outlined..1.exit: 3851 // CHECK3-NEXT: ret i32 0 3852 // 3853 // 3854 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 3855 // CHECK3-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 3856 // CHECK3-NEXT: entry: 3857 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3858 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 3859 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 3860 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3861 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 3862 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 3863 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 3864 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 3865 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 3866 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 3867 // CHECK3-NEXT: ret void 3868 // 3869 // 3870 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2 3871 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 3872 // CHECK3-NEXT: entry: 3873 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3874 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3875 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3876 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 3877 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3878 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3879 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 3880 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3881 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3882 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3883 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3884 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3885 // CHECK3-NEXT: [[K1:%.*]] = alloca i64, align 8 3886 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3887 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3888 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3889 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 3890 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 3891 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 3892 // CHECK3-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 3893 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3894 // CHECK3-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 3895 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3896 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3897 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3898 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3899 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 3900 // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) 3901 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 3902 // CHECK3: omp.dispatch.cond: 3903 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 3904 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 3905 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 3906 // CHECK3: omp.dispatch.body: 3907 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3908 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 3909 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3910 // CHECK3: omp.inner.for.cond: 3911 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3912 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 3913 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 3914 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3915 // CHECK3: omp.inner.for.body: 3916 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3917 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 3918 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 3919 // CHECK3-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27 3920 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27 3921 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3922 // CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 3923 // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 3924 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 3925 // CHECK3-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27 3926 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27 3927 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 3928 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27 3929 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3930 // CHECK3: omp.body.continue: 3931 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3932 // CHECK3: omp.inner.for.inc: 3933 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3934 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 3935 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3936 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 3937 // CHECK3: omp.inner.for.end: 3938 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 3939 // CHECK3: omp.dispatch.inc: 3940 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] 3941 // CHECK3: omp.dispatch.end: 3942 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3943 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 3944 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3945 // CHECK3: .omp.final.then: 3946 // CHECK3-NEXT: store i32 1, i32* [[I]], align 4 3947 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3948 // CHECK3: .omp.final.done: 3949 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3950 // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 3951 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 3952 // CHECK3: .omp.linear.pu: 3953 // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 3954 // CHECK3-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 3955 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 3956 // CHECK3: .omp.linear.pu.done: 3957 // CHECK3-NEXT: ret void 3958 // 3959 // 3960 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 3961 // CHECK3-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 3962 // CHECK3-NEXT: entry: 3963 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 3964 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 3965 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3966 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 3967 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 3968 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 3969 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 3970 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 3971 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 3972 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 3973 // CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 3974 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 3975 // CHECK3-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 3976 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 3977 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 3978 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 3979 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 3980 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 3981 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 3982 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 3983 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 3984 // CHECK3-NEXT: ret void 3985 // 3986 // 3987 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3 3988 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 3989 // CHECK3-NEXT: entry: 3990 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3991 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3992 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 3993 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 3994 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 3995 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 3996 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 3997 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 3998 // CHECK3-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 3999 // CHECK3-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 4000 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 4001 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 4002 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 4003 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4004 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 4005 // CHECK3-NEXT: [[LIN2:%.*]] = alloca i32, align 4 4006 // CHECK3-NEXT: [[A3:%.*]] = alloca i32, align 4 4007 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4008 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4009 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4010 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 4011 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4012 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4013 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 4014 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 4015 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4016 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 4017 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 4018 // CHECK3-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 4019 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 4020 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 4021 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 4022 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4023 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4024 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4025 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 4026 // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 4027 // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4028 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 4029 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4030 // CHECK3: cond.true: 4031 // CHECK3-NEXT: br label [[COND_END:%.*]] 4032 // CHECK3: cond.false: 4033 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4034 // CHECK3-NEXT: br label [[COND_END]] 4035 // CHECK3: cond.end: 4036 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4037 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 4038 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 4039 // CHECK3-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 4040 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4041 // CHECK3: omp.inner.for.cond: 4042 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4043 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 4044 // CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 4045 // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4046 // CHECK3: omp.inner.for.body: 4047 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4048 // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 4049 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 4050 // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30 4051 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30 4052 // CHECK3-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 4053 // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4054 // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 4055 // CHECK3-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 4056 // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 4057 // CHECK3-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 4058 // CHECK3-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30 4059 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30 4060 // CHECK3-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 4061 // CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4062 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 4063 // CHECK3-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 4064 // CHECK3-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 4065 // CHECK3-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 4066 // CHECK3-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30 4067 // CHECK3-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 4068 // CHECK3-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 4069 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 4070 // CHECK3-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 4071 // CHECK3-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !30 4072 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4073 // CHECK3: omp.body.continue: 4074 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4075 // CHECK3: omp.inner.for.inc: 4076 // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4077 // CHECK3-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 4078 // CHECK3-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 4079 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 4080 // CHECK3: omp.inner.for.end: 4081 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4082 // CHECK3: omp.loop.exit: 4083 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4084 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4085 // CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 4086 // CHECK3-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4087 // CHECK3: .omp.final.then: 4088 // CHECK3-NEXT: store i64 400, i64* [[IT]], align 8 4089 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4090 // CHECK3: .omp.final.done: 4091 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4092 // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 4093 // CHECK3-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 4094 // CHECK3: .omp.linear.pu: 4095 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 4096 // CHECK3-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 4097 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 4098 // CHECK3-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 4099 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 4100 // CHECK3: .omp.linear.pu.done: 4101 // CHECK3-NEXT: ret void 4102 // 4103 // 4104 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 4105 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 4106 // CHECK3-NEXT: entry: 4107 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4108 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4109 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4110 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4111 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4112 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4113 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4114 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 4115 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4116 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4117 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 4118 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4119 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 4120 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4121 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 4122 // CHECK3-NEXT: ret void 4123 // 4124 // 4125 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 4126 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 4127 // CHECK3-NEXT: entry: 4128 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4129 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4130 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4131 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4132 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4133 // CHECK3-NEXT: [[TMP:%.*]] = alloca i16, align 2 4134 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4135 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4136 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4137 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4138 // CHECK3-NEXT: [[IT:%.*]] = alloca i16, align 2 4139 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4140 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4141 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4142 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4143 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4144 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4145 // CHECK3-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 4146 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4147 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4148 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4149 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4150 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4151 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4152 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 4153 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4154 // CHECK3: cond.true: 4155 // CHECK3-NEXT: br label [[COND_END:%.*]] 4156 // CHECK3: cond.false: 4157 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4158 // CHECK3-NEXT: br label [[COND_END]] 4159 // CHECK3: cond.end: 4160 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4161 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4162 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4163 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4164 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4165 // CHECK3: omp.inner.for.cond: 4166 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 4167 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 4168 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4169 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4170 // CHECK3: omp.inner.for.body: 4171 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 4172 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 4173 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 4174 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 4175 // CHECK3-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33 4176 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 4177 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 4178 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 4179 // CHECK3-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 4180 // CHECK3-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 4181 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 4182 // CHECK3-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 4183 // CHECK3-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !33 4184 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4185 // CHECK3: omp.body.continue: 4186 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4187 // CHECK3: omp.inner.for.inc: 4188 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 4189 // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 4190 // CHECK3-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 4191 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 4192 // CHECK3: omp.inner.for.end: 4193 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4194 // CHECK3: omp.loop.exit: 4195 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4196 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4197 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4198 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4199 // CHECK3: .omp.final.then: 4200 // CHECK3-NEXT: store i16 22, i16* [[IT]], align 2 4201 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4202 // CHECK3: .omp.final.done: 4203 // CHECK3-NEXT: ret void 4204 // 4205 // 4206 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 4207 // CHECK3-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 4208 // CHECK3-NEXT: entry: 4209 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4210 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 4211 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4212 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 4213 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 4214 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4215 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 4216 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 4217 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 4218 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4219 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4220 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4221 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4222 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 4223 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4224 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 4225 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 4226 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4227 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 4228 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 4229 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 4230 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4231 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 4232 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4233 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 4234 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 4235 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4236 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 4237 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 4238 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 4239 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 4240 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 4241 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 4242 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4243 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4244 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4245 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 4246 // CHECK3-NEXT: ret void 4247 // 4248 // 4249 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 4250 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 4251 // CHECK3-NEXT: entry: 4252 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4253 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4254 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4255 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 4256 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4257 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 4258 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 4259 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4260 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 4261 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 4262 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 4263 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4264 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4265 // CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1 4266 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4267 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4268 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4269 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4270 // CHECK3-NEXT: [[IT:%.*]] = alloca i8, align 1 4271 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4272 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4273 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4274 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 4275 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4276 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 4277 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 4278 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4279 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 4280 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 4281 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 4282 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4283 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 4284 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4285 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 4286 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 4287 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4288 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 4289 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 4290 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 4291 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4292 // CHECK3-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 4293 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4294 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4295 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4296 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4297 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 4298 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 4299 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 4300 // CHECK3: omp.dispatch.cond: 4301 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4302 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 4303 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4304 // CHECK3: cond.true: 4305 // CHECK3-NEXT: br label [[COND_END:%.*]] 4306 // CHECK3: cond.false: 4307 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4308 // CHECK3-NEXT: br label [[COND_END]] 4309 // CHECK3: cond.end: 4310 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 4311 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4312 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4313 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 4314 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4315 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4316 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 4317 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 4318 // CHECK3: omp.dispatch.body: 4319 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4320 // CHECK3: omp.inner.for.cond: 4321 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 4322 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36 4323 // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 4324 // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4325 // CHECK3: omp.inner.for.body: 4326 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 4327 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 4328 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 4329 // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 4330 // CHECK3-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36 4331 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36 4332 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 4333 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36 4334 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 4335 // CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36 4336 // CHECK3-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 4337 // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 4338 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 4339 // CHECK3-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36 4340 // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 4341 // CHECK3-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 4342 // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 4343 // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 4344 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 4345 // CHECK3-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 4346 // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 4347 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 4348 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 4349 // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 4350 // CHECK3-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 4351 // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 4352 // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 4353 // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 4354 // CHECK3-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 4355 // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 4356 // CHECK3-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 4357 // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 4358 // CHECK3-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36 4359 // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 4360 // CHECK3-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36 4361 // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 4362 // CHECK3-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36 4363 // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 4364 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 4365 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 4366 // CHECK3-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36 4367 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4368 // CHECK3: omp.body.continue: 4369 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4370 // CHECK3: omp.inner.for.inc: 4371 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 4372 // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 4373 // CHECK3-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 4374 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 4375 // CHECK3: omp.inner.for.end: 4376 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4377 // CHECK3: omp.dispatch.inc: 4378 // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4379 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4380 // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 4381 // CHECK3-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 4382 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4383 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4384 // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 4385 // CHECK3-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 4386 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] 4387 // CHECK3: omp.dispatch.end: 4388 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 4389 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4390 // CHECK3-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 4391 // CHECK3-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4392 // CHECK3: .omp.final.then: 4393 // CHECK3-NEXT: store i8 96, i8* [[IT]], align 1 4394 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4395 // CHECK3: .omp.final.done: 4396 // CHECK3-NEXT: ret void 4397 // 4398 // 4399 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari 4400 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4401 // CHECK3-NEXT: entry: 4402 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4403 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4404 // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 4405 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4406 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4407 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4408 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 4409 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 4410 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 4411 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A]], align 4 4412 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 4413 // CHECK3-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 4414 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4415 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 4416 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 4417 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4418 // CHECK3-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 4419 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 4420 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 4421 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 4422 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4423 // CHECK3-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 4424 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 4425 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 4426 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 4427 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 4428 // CHECK3-NEXT: ret i32 [[TMP8]] 4429 // 4430 // 4431 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 4432 // CHECK3-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 4433 // CHECK3-NEXT: entry: 4434 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4435 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4436 // CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 4437 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 4438 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 4439 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4440 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 4441 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 4442 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 4443 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 4444 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4445 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4446 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4447 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4448 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 4449 // CHECK3-NEXT: store i32 [[ADD]], i32* [[B]], align 4 4450 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 4451 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 4452 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 4453 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 4454 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 4455 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 4456 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 4457 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 4458 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 4459 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4460 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 4461 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4462 // CHECK3: omp_if.then: 4463 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 4464 // CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 4465 // CHECK3-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 4466 // CHECK3-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 4467 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4468 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 4469 // CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 4470 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4471 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 4472 // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 4473 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4474 // CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 4475 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4476 // CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 4477 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4478 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 4479 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 4480 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4481 // CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 4482 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 4483 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 4484 // CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 4485 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4486 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 4487 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4488 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 4489 // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 4490 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4491 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 4492 // CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 4493 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 4494 // CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 4495 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4496 // CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 4497 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 4498 // CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 4499 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 4500 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 4501 // CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 4502 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 4503 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 4504 // CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 4505 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 4506 // CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 4507 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 4508 // CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 4509 // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 4510 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 4511 // CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 4512 // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 4513 // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 4514 // CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 4515 // CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 4516 // CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 4517 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4518 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4519 // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4520 // CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4521 // CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 4522 // CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4523 // CHECK3: omp_offload.failed: 4524 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 4525 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4526 // CHECK3: omp_offload.cont: 4527 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4528 // CHECK3: omp_if.else: 4529 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 4530 // CHECK3-NEXT: br label [[OMP_IF_END]] 4531 // CHECK3: omp_if.end: 4532 // CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 4533 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 4534 // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 4535 // CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 4536 // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 4537 // CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 4538 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 4539 // CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 4540 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 4541 // CHECK3-NEXT: ret i32 [[ADD3]] 4542 // 4543 // 4544 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici 4545 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4546 // CHECK3-NEXT: entry: 4547 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4548 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4549 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 4550 // CHECK3-NEXT: [[AAA:%.*]] = alloca i8, align 1 4551 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 4552 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4553 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4554 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 4555 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 4556 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 4557 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 4558 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4559 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4560 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 4561 // CHECK3-NEXT: store i8 0, i8* [[AAA]], align 1 4562 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 4563 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4564 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4565 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 4566 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4567 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 4568 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4569 // CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 4570 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 4571 // CHECK3-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 4572 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 4573 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4574 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 4575 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4576 // CHECK3: omp_if.then: 4577 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4578 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 4579 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 4580 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4581 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 4582 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 4583 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4584 // CHECK3-NEXT: store i8* null, i8** [[TMP11]], align 4 4585 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4586 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 4587 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 4588 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4589 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 4590 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 4591 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4592 // CHECK3-NEXT: store i8* null, i8** [[TMP16]], align 4 4593 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4594 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 4595 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 4596 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4597 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 4598 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 4599 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4600 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 4601 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 4602 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 4603 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 4604 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 4605 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 4606 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 4607 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 4608 // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 4609 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4610 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4611 // CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4612 // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 4613 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4614 // CHECK3: omp_offload.failed: 4615 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 4616 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4617 // CHECK3: omp_offload.cont: 4618 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4619 // CHECK3: omp_if.else: 4620 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 4621 // CHECK3-NEXT: br label [[OMP_IF_END]] 4622 // CHECK3: omp_if.end: 4623 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 4624 // CHECK3-NEXT: ret i32 [[TMP31]] 4625 // 4626 // 4627 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 4628 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 4629 // CHECK3-NEXT: entry: 4630 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4631 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4632 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 4633 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 4634 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4635 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4636 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 4637 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 4638 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 4639 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4640 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4641 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 4642 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 4643 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4644 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4645 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 4646 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4647 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 4648 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4649 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4650 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 4651 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4652 // CHECK3: omp_if.then: 4653 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4654 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 4655 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 4656 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4657 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 4658 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 4659 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4660 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 4661 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4662 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 4663 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 4664 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4665 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 4666 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 4667 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4668 // CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 4669 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4670 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 4671 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 4672 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4673 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 4674 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 4675 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4676 // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 4677 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4678 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4679 // CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4680 // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 4681 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4682 // CHECK3: omp_offload.failed: 4683 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 4684 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4685 // CHECK3: omp_offload.cont: 4686 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4687 // CHECK3: omp_if.else: 4688 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 4689 // CHECK3-NEXT: br label [[OMP_IF_END]] 4690 // CHECK3: omp_if.end: 4691 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 4692 // CHECK3-NEXT: ret i32 [[TMP24]] 4693 // 4694 // 4695 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 4696 // CHECK3-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 4697 // CHECK3-NEXT: entry: 4698 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4699 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4700 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4701 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4702 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 4703 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4704 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4705 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4706 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4707 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4708 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 4709 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4710 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4711 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4712 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 4713 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 4714 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 4715 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 4716 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 4717 // CHECK3-NEXT: ret void 4718 // 4719 // 4720 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 4721 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 4722 // CHECK3-NEXT: entry: 4723 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4724 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4725 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4726 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4727 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4728 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4729 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 4730 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 4731 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 4732 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 4733 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 4734 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 4735 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4736 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 4737 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4738 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4739 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4740 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4741 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4742 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4743 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 4744 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4745 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4746 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4747 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 4748 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 4749 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 4750 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 4751 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4752 // CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4753 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 4754 // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 4755 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4756 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 4757 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4758 // CHECK3: cond.true: 4759 // CHECK3-NEXT: br label [[COND_END:%.*]] 4760 // CHECK3: cond.false: 4761 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4762 // CHECK3-NEXT: br label [[COND_END]] 4763 // CHECK3: cond.end: 4764 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 4765 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 4766 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 4767 // CHECK3-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 4768 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4769 // CHECK3: omp.inner.for.cond: 4770 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 4771 // CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39 4772 // CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 4773 // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4774 // CHECK3: omp.inner.for.body: 4775 // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 4776 // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 4777 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 4778 // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39 4779 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39 4780 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 4781 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 4782 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 4783 // CHECK3-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group !39 4784 // CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 4785 // CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !39 4786 // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 4787 // CHECK3-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group !39 4788 // CHECK3-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 4789 // CHECK3-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 4790 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 4791 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 4792 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !39 4793 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4794 // CHECK3: omp.body.continue: 4795 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4796 // CHECK3: omp.inner.for.inc: 4797 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 4798 // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 4799 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 4800 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 4801 // CHECK3: omp.inner.for.end: 4802 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4803 // CHECK3: omp.loop.exit: 4804 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 4805 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4806 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 4807 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4808 // CHECK3: .omp.final.then: 4809 // CHECK3-NEXT: store i64 400, i64* [[IT]], align 8 4810 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4811 // CHECK3: .omp.final.done: 4812 // CHECK3-NEXT: ret void 4813 // 4814 // 4815 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 4816 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 4817 // CHECK3-NEXT: entry: 4818 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4819 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4820 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 4821 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 4822 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4823 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4824 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 4825 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4826 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4827 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 4828 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 4829 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4830 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 4831 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 4832 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4833 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 4834 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 4835 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 4836 // CHECK3-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4837 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 4838 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4839 // CHECK3-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 4840 // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 4841 // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 4842 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 4843 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 4844 // CHECK3-NEXT: ret void 4845 // 4846 // 4847 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 4848 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 4849 // CHECK3-NEXT: entry: 4850 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4851 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4852 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4853 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4854 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 4855 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 4856 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4857 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4858 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4859 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4860 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4861 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4862 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 4863 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 4864 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4865 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 4866 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 4867 // CHECK3-NEXT: ret void 4868 // 4869 // 4870 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 4871 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 4872 // CHECK3-NEXT: entry: 4873 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4874 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4875 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 4876 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4877 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4878 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4879 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4880 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 4881 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4882 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 4883 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4884 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 4885 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 4886 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 4887 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4888 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 4889 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4890 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 4891 // CHECK3-NEXT: ret void 4892 // 4893 // 4894 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 4895 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 4896 // CHECK3-NEXT: entry: 4897 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4898 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4899 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4900 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4901 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 4902 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 4903 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 4904 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 4905 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 4906 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 4907 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4908 // CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8 4909 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4910 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4911 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4912 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4913 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 4914 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4915 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 4916 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 4917 // CHECK3-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 4918 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 4919 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4920 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4921 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 4922 // CHECK3-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 4923 // CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4924 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 4925 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4926 // CHECK3: cond.true: 4927 // CHECK3-NEXT: br label [[COND_END:%.*]] 4928 // CHECK3: cond.false: 4929 // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4930 // CHECK3-NEXT: br label [[COND_END]] 4931 // CHECK3: cond.end: 4932 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 4933 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 4934 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 4935 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 4936 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4937 // CHECK3: omp.inner.for.cond: 4938 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 4939 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !42 4940 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 4941 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4942 // CHECK3: omp.inner.for.body: 4943 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 4944 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 4945 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 4946 // CHECK3-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !42 4947 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42 4948 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 4949 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !42 4950 // CHECK3-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !42 4951 // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 4952 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 4953 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 4954 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !42 4955 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 4956 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42 4957 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 4958 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42 4959 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4960 // CHECK3: omp.body.continue: 4961 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4962 // CHECK3: omp.inner.for.inc: 4963 // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 4964 // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 4965 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 4966 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] 4967 // CHECK3: omp.inner.for.end: 4968 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4969 // CHECK3: omp.loop.exit: 4970 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4971 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4972 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4973 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4974 // CHECK3: .omp.final.then: 4975 // CHECK3-NEXT: store i64 11, i64* [[I]], align 8 4976 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4977 // CHECK3: .omp.final.done: 4978 // CHECK3-NEXT: ret void 4979 // 4980 // 4981 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4982 // CHECK3-SAME: () #[[ATTR7:[0-9]+]] { 4983 // CHECK3-NEXT: entry: 4984 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 4985 // CHECK3-NEXT: ret void 4986 // 4987 // 4988 // CHECK4-LABEL: define {{[^@]+}}@_Z7get_valv 4989 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] { 4990 // CHECK4-NEXT: entry: 4991 // CHECK4-NEXT: ret i64 0 4992 // 4993 // 4994 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooi 4995 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4996 // CHECK4-NEXT: entry: 4997 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4998 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 4999 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 5000 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x float], align 4 5001 // CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 5002 // CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 5003 // CHECK4-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 5004 // CHECK4-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 5005 // CHECK4-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 5006 // CHECK4-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 5007 // CHECK4-NEXT: [[K:%.*]] = alloca i64, align 8 5008 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5009 // CHECK4-NEXT: [[LIN:%.*]] = alloca i32, align 4 5010 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5011 // CHECK4-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 5012 // CHECK4-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 5013 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 5014 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 5015 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 5016 // CHECK4-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 5017 // CHECK4-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 5018 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 5019 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 5020 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 5021 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5022 // CHECK4-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 5023 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5024 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 5025 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 5026 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 5027 // CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 5028 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 5029 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5030 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 5031 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 5032 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5033 // CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 5034 // CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 5035 // CHECK4-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 5036 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 5037 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5038 // CHECK4-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 5039 // CHECK4-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 5040 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 5041 // CHECK4-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 5042 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* 5043 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 5044 // CHECK4-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) 5045 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 5046 // CHECK4-NEXT: store i64 [[CALL]], i64* [[K]], align 8 5047 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 5048 // CHECK4-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 5049 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 5050 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] 5051 // CHECK4-NEXT: store i32 12, i32* [[LIN]], align 4 5052 // CHECK4-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 5053 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5054 // CHECK4-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 5055 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5056 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 5057 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 5058 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 5059 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 5060 // CHECK4-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 5061 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 5062 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5063 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 5064 // CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 5065 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5066 // CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 5067 // CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 5068 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5069 // CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 5070 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5071 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 5072 // CHECK4-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 5073 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5074 // CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 5075 // CHECK4-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 5076 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5077 // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 5078 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 5079 // CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 5080 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 5081 // CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 5082 // CHECK4-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* 5083 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 5084 // CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 5085 // CHECK4-NEXT: store i8* null, i8** [[TMP31]], align 4 5086 // CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5087 // CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5088 // CHECK4-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 5089 // CHECK4-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 5090 // CHECK4-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5091 // CHECK4: omp_offload.failed: 5092 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] 5093 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 5094 // CHECK4: omp_offload.cont: 5095 // CHECK4-NEXT: [[TMP36:%.*]] = load i32, i32* [[A]], align 4 5096 // CHECK4-NEXT: store i32 [[TMP36]], i32* [[A_CASTED3]], align 4 5097 // CHECK4-NEXT: [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4 5098 // CHECK4-NEXT: [[TMP38:%.*]] = load i16, i16* [[AA]], align 2 5099 // CHECK4-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 5100 // CHECK4-NEXT: store i16 [[TMP38]], i16* [[CONV5]], align 2 5101 // CHECK4-NEXT: [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 5102 // CHECK4-NEXT: [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4 5103 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10 5104 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 5105 // CHECK4: omp_if.then: 5106 // CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 5107 // CHECK4-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32* 5108 // CHECK4-NEXT: store i32 [[TMP37]], i32* [[TMP42]], align 4 5109 // CHECK4-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 5110 // CHECK4-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 5111 // CHECK4-NEXT: store i32 [[TMP37]], i32* [[TMP44]], align 4 5112 // CHECK4-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 5113 // CHECK4-NEXT: store i8* null, i8** [[TMP45]], align 4 5114 // CHECK4-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 5115 // CHECK4-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* 5116 // CHECK4-NEXT: store i32 [[TMP39]], i32* [[TMP47]], align 4 5117 // CHECK4-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 5118 // CHECK4-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* 5119 // CHECK4-NEXT: store i32 [[TMP39]], i32* [[TMP49]], align 4 5120 // CHECK4-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 5121 // CHECK4-NEXT: store i8* null, i8** [[TMP50]], align 4 5122 // CHECK4-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 5123 // CHECK4-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 5124 // CHECK4-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 5125 // CHECK4-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 5126 // CHECK4-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 5127 // CHECK4: omp_offload.failed9: 5128 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 5129 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT10]] 5130 // CHECK4: omp_offload.cont10: 5131 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 5132 // CHECK4: omp_if.else: 5133 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 5134 // CHECK4-NEXT: br label [[OMP_IF_END]] 5135 // CHECK4: omp_if.end: 5136 // CHECK4-NEXT: [[TMP55:%.*]] = load i32, i32* [[A]], align 4 5137 // CHECK4-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4 5138 // CHECK4-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 5139 // CHECK4-NEXT: store i32 [[TMP56]], i32* [[A_CASTED11]], align 4 5140 // CHECK4-NEXT: [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4 5141 // CHECK4-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5142 // CHECK4-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5143 // CHECK4-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5144 // CHECK4-NEXT: [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4 5145 // CHECK4-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20 5146 // CHECK4-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 5147 // CHECK4: omp_if.then13: 5148 // CHECK4-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4 5149 // CHECK4-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 5150 // CHECK4-NEXT: [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]] 5151 // CHECK4-NEXT: [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8 5152 // CHECK4-NEXT: [[TMP65:%.*]] = sext i32 [[TMP64]] to i64 5153 // CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 5154 // CHECK4-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32* 5155 // CHECK4-NEXT: store i32 [[TMP57]], i32* [[TMP67]], align 4 5156 // CHECK4-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 5157 // CHECK4-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 5158 // CHECK4-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 5159 // CHECK4-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5160 // CHECK4-NEXT: store i64 4, i64* [[TMP70]], align 4 5161 // CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 5162 // CHECK4-NEXT: store i8* null, i8** [[TMP71]], align 4 5163 // CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 5164 // CHECK4-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** 5165 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 5166 // CHECK4-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 5167 // CHECK4-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 5168 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 5169 // CHECK4-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 5170 // CHECK4-NEXT: store i64 40, i64* [[TMP76]], align 4 5171 // CHECK4-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 5172 // CHECK4-NEXT: store i8* null, i8** [[TMP77]], align 4 5173 // CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 5174 // CHECK4-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 5175 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 5176 // CHECK4-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 5177 // CHECK4-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* 5178 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 5179 // CHECK4-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 5180 // CHECK4-NEXT: store i64 4, i64* [[TMP82]], align 4 5181 // CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 5182 // CHECK4-NEXT: store i8* null, i8** [[TMP83]], align 4 5183 // CHECK4-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 5184 // CHECK4-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** 5185 // CHECK4-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 5186 // CHECK4-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 5187 // CHECK4-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 5188 // CHECK4-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 5189 // CHECK4-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 5190 // CHECK4-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 5191 // CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 5192 // CHECK4-NEXT: store i8* null, i8** [[TMP89]], align 4 5193 // CHECK4-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 5194 // CHECK4-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** 5195 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 5196 // CHECK4-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 5197 // CHECK4-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 5198 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 5199 // CHECK4-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 5200 // CHECK4-NEXT: store i64 400, i64* [[TMP94]], align 4 5201 // CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 5202 // CHECK4-NEXT: store i8* null, i8** [[TMP95]], align 4 5203 // CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 5204 // CHECK4-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* 5205 // CHECK4-NEXT: store i32 5, i32* [[TMP97]], align 4 5206 // CHECK4-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 5207 // CHECK4-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* 5208 // CHECK4-NEXT: store i32 5, i32* [[TMP99]], align 4 5209 // CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 5210 // CHECK4-NEXT: store i64 4, i64* [[TMP100]], align 4 5211 // CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 5212 // CHECK4-NEXT: store i8* null, i8** [[TMP101]], align 4 5213 // CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 5214 // CHECK4-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* 5215 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 5216 // CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 5217 // CHECK4-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* 5218 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 5219 // CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 5220 // CHECK4-NEXT: store i64 4, i64* [[TMP106]], align 4 5221 // CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 5222 // CHECK4-NEXT: store i8* null, i8** [[TMP107]], align 4 5223 // CHECK4-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 5224 // CHECK4-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** 5225 // CHECK4-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 5226 // CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 5227 // CHECK4-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 5228 // CHECK4-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 5229 // CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 5230 // CHECK4-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 5231 // CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 5232 // CHECK4-NEXT: store i8* null, i8** [[TMP113]], align 4 5233 // CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 5234 // CHECK4-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** 5235 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 5236 // CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 5237 // CHECK4-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 5238 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 5239 // CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 5240 // CHECK4-NEXT: store i64 12, i64* [[TMP118]], align 4 5241 // CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 5242 // CHECK4-NEXT: store i8* null, i8** [[TMP119]], align 4 5243 // CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 5244 // CHECK4-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* 5245 // CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 5246 // CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 5247 // CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* 5248 // CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 5249 // CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 5250 // CHECK4-NEXT: store i64 4, i64* [[TMP124]], align 4 5251 // CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 5252 // CHECK4-NEXT: store i8* null, i8** [[TMP125]], align 4 5253 // CHECK4-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 5254 // CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 5255 // CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5256 // CHECK4-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 5257 // CHECK4-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 5258 // CHECK4-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 5259 // CHECK4: omp_offload.failed17: 5260 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 5261 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT18]] 5262 // CHECK4: omp_offload.cont18: 5263 // CHECK4-NEXT: br label [[OMP_IF_END20:%.*]] 5264 // CHECK4: omp_if.else19: 5265 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 5266 // CHECK4-NEXT: br label [[OMP_IF_END20]] 5267 // CHECK4: omp_if.end20: 5268 // CHECK4-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 5269 // CHECK4-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 5270 // CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) 5271 // CHECK4-NEXT: ret i32 [[TMP131]] 5272 // 5273 // 5274 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 5275 // CHECK4-SAME: () #[[ATTR2:[0-9]+]] { 5276 // CHECK4-NEXT: entry: 5277 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 5278 // CHECK4-NEXT: ret void 5279 // 5280 // 5281 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 5282 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 5283 // CHECK4-NEXT: entry: 5284 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5285 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5286 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5287 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5288 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5289 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5290 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5291 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5292 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5293 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5294 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5295 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5296 // CHECK4-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 5297 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5298 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5299 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5300 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5301 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5302 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5303 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 5304 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5305 // CHECK4: cond.true: 5306 // CHECK4-NEXT: br label [[COND_END:%.*]] 5307 // CHECK4: cond.false: 5308 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5309 // CHECK4-NEXT: br label [[COND_END]] 5310 // CHECK4: cond.end: 5311 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5312 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5313 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5314 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5315 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5316 // CHECK4: omp.inner.for.cond: 5317 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 5318 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 5319 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5320 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5321 // CHECK4: omp.inner.for.body: 5322 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 5323 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 5324 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 5325 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 5326 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5327 // CHECK4: omp.body.continue: 5328 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5329 // CHECK4: omp.inner.for.inc: 5330 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 5331 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 5332 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 5333 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 5334 // CHECK4: omp.inner.for.end: 5335 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5336 // CHECK4: omp.loop.exit: 5337 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5338 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5339 // CHECK4-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 5340 // CHECK4-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5341 // CHECK4: .omp.final.then: 5342 // CHECK4-NEXT: store i32 33, i32* [[I]], align 4 5343 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5344 // CHECK4: .omp.final.done: 5345 // CHECK4-NEXT: ret void 5346 // 5347 // 5348 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_entry. 5349 // CHECK4-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 5350 // CHECK4-NEXT: entry: 5351 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 5352 // CHECK4-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 5353 // CHECK4-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 5354 // CHECK4-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 5355 // CHECK4-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 5356 // CHECK4-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 5357 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 5358 // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 5359 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 5360 // CHECK4-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 5361 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 5362 // CHECK4-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 5363 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 5364 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 5365 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 5366 // CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 5367 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 5368 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 5369 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) 5370 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 5371 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 5372 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) 5373 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 5374 // CHECK4-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 5375 // CHECK4-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 5376 // CHECK4-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 5377 // CHECK4-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 5378 // CHECK4-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 5379 // CHECK4-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 5380 // CHECK4-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 5381 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5382 // CHECK4-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 5383 // CHECK4: omp_offload.failed.i: 5384 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 5385 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 5386 // CHECK4: .omp_outlined..1.exit: 5387 // CHECK4-NEXT: ret i32 0 5388 // 5389 // 5390 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 5391 // CHECK4-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 5392 // CHECK4-NEXT: entry: 5393 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5394 // CHECK4-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 5395 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5396 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5397 // CHECK4-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 5398 // CHECK4-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 5399 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5400 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 5401 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 5402 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 5403 // CHECK4-NEXT: ret void 5404 // 5405 // 5406 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2 5407 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 5408 // CHECK4-NEXT: entry: 5409 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5410 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5411 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5412 // CHECK4-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 5413 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5414 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5415 // CHECK4-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 5416 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5417 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5418 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5419 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5420 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5421 // CHECK4-NEXT: [[K1:%.*]] = alloca i64, align 8 5422 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5423 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5424 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5425 // CHECK4-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 5426 // CHECK4-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 5427 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 5428 // CHECK4-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 5429 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5430 // CHECK4-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 5431 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5432 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5433 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5434 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5435 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 5436 // CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) 5437 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 5438 // CHECK4: omp.dispatch.cond: 5439 // CHECK4-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 5440 // CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 5441 // CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 5442 // CHECK4: omp.dispatch.body: 5443 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5444 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 5445 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5446 // CHECK4: omp.inner.for.cond: 5447 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 5448 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 5449 // CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 5450 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5451 // CHECK4: omp.inner.for.body: 5452 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 5453 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 5454 // CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 5455 // CHECK4-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27 5456 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27 5457 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 5458 // CHECK4-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 5459 // CHECK4-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 5460 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 5461 // CHECK4-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27 5462 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27 5463 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 5464 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27 5465 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5466 // CHECK4: omp.body.continue: 5467 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5468 // CHECK4: omp.inner.for.inc: 5469 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 5470 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 5471 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 5472 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 5473 // CHECK4: omp.inner.for.end: 5474 // CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 5475 // CHECK4: omp.dispatch.inc: 5476 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND]] 5477 // CHECK4: omp.dispatch.end: 5478 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5479 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 5480 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5481 // CHECK4: .omp.final.then: 5482 // CHECK4-NEXT: store i32 1, i32* [[I]], align 4 5483 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5484 // CHECK4: .omp.final.done: 5485 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5486 // CHECK4-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 5487 // CHECK4-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 5488 // CHECK4: .omp.linear.pu: 5489 // CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 5490 // CHECK4-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 5491 // CHECK4-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 5492 // CHECK4: .omp.linear.pu.done: 5493 // CHECK4-NEXT: ret void 5494 // 5495 // 5496 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 5497 // CHECK4-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 5498 // CHECK4-NEXT: entry: 5499 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5500 // CHECK4-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 5501 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5502 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5503 // CHECK4-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 5504 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5505 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5506 // CHECK4-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 5507 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5508 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5509 // CHECK4-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 5510 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5511 // CHECK4-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 5512 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5513 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 5514 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 5515 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 5516 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 5517 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 5518 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 5519 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 5520 // CHECK4-NEXT: ret void 5521 // 5522 // 5523 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3 5524 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 5525 // CHECK4-NEXT: entry: 5526 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5527 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5528 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5529 // CHECK4-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 5530 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5531 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 5532 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 5533 // CHECK4-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 5534 // CHECK4-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 5535 // CHECK4-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 5536 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 5537 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 5538 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 5539 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5540 // CHECK4-NEXT: [[IT:%.*]] = alloca i64, align 8 5541 // CHECK4-NEXT: [[LIN2:%.*]] = alloca i32, align 4 5542 // CHECK4-NEXT: [[A3:%.*]] = alloca i32, align 4 5543 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5544 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5545 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5546 // CHECK4-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 5547 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5548 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5549 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 5550 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 5551 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5552 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 5553 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 5554 // CHECK4-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 5555 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 5556 // CHECK4-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 5557 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 5558 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5559 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5560 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5561 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 5562 // CHECK4-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 5563 // CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5564 // CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 5565 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5566 // CHECK4: cond.true: 5567 // CHECK4-NEXT: br label [[COND_END:%.*]] 5568 // CHECK4: cond.false: 5569 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5570 // CHECK4-NEXT: br label [[COND_END]] 5571 // CHECK4: cond.end: 5572 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5573 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 5574 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 5575 // CHECK4-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 5576 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5577 // CHECK4: omp.inner.for.cond: 5578 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5579 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 5580 // CHECK4-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 5581 // CHECK4-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5582 // CHECK4: omp.inner.for.body: 5583 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5584 // CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 5585 // CHECK4-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 5586 // CHECK4-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30 5587 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30 5588 // CHECK4-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 5589 // CHECK4-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5590 // CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 5591 // CHECK4-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 5592 // CHECK4-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 5593 // CHECK4-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 5594 // CHECK4-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30 5595 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30 5596 // CHECK4-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 5597 // CHECK4-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5598 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 5599 // CHECK4-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 5600 // CHECK4-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 5601 // CHECK4-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 5602 // CHECK4-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30 5603 // CHECK4-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 5604 // CHECK4-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 5605 // CHECK4-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 5606 // CHECK4-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 5607 // CHECK4-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !30 5608 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5609 // CHECK4: omp.body.continue: 5610 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5611 // CHECK4: omp.inner.for.inc: 5612 // CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5613 // CHECK4-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 5614 // CHECK4-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 5615 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 5616 // CHECK4: omp.inner.for.end: 5617 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5618 // CHECK4: omp.loop.exit: 5619 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5620 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5621 // CHECK4-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 5622 // CHECK4-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5623 // CHECK4: .omp.final.then: 5624 // CHECK4-NEXT: store i64 400, i64* [[IT]], align 8 5625 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5626 // CHECK4: .omp.final.done: 5627 // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5628 // CHECK4-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 5629 // CHECK4-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 5630 // CHECK4: .omp.linear.pu: 5631 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 5632 // CHECK4-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 5633 // CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 5634 // CHECK4-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 5635 // CHECK4-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 5636 // CHECK4: .omp.linear.pu.done: 5637 // CHECK4-NEXT: ret void 5638 // 5639 // 5640 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 5641 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 5642 // CHECK4-NEXT: entry: 5643 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5644 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5645 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5646 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5647 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5648 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5649 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5650 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 5651 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 5652 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 5653 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 5654 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5655 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 5656 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5657 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 5658 // CHECK4-NEXT: ret void 5659 // 5660 // 5661 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4 5662 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 5663 // CHECK4-NEXT: entry: 5664 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5665 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5666 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5667 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5668 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5669 // CHECK4-NEXT: [[TMP:%.*]] = alloca i16, align 2 5670 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5671 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5672 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5673 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5674 // CHECK4-NEXT: [[IT:%.*]] = alloca i16, align 2 5675 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5676 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5677 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5678 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5679 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5680 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5681 // CHECK4-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 5682 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5683 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5684 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5685 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5686 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5687 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5688 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 5689 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5690 // CHECK4: cond.true: 5691 // CHECK4-NEXT: br label [[COND_END:%.*]] 5692 // CHECK4: cond.false: 5693 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5694 // CHECK4-NEXT: br label [[COND_END]] 5695 // CHECK4: cond.end: 5696 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5697 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5698 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5699 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5700 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5701 // CHECK4: omp.inner.for.cond: 5702 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 5703 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 5704 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5705 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5706 // CHECK4: omp.inner.for.body: 5707 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 5708 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 5709 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 5710 // CHECK4-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 5711 // CHECK4-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33 5712 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 5713 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 5714 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 5715 // CHECK4-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 5716 // CHECK4-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 5717 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 5718 // CHECK4-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 5719 // CHECK4-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !33 5720 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5721 // CHECK4: omp.body.continue: 5722 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5723 // CHECK4: omp.inner.for.inc: 5724 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 5725 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 5726 // CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 5727 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 5728 // CHECK4: omp.inner.for.end: 5729 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5730 // CHECK4: omp.loop.exit: 5731 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5732 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5733 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5734 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5735 // CHECK4: .omp.final.then: 5736 // CHECK4-NEXT: store i16 22, i16* [[IT]], align 2 5737 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5738 // CHECK4: .omp.final.done: 5739 // CHECK4-NEXT: ret void 5740 // 5741 // 5742 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 5743 // CHECK4-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 5744 // CHECK4-NEXT: entry: 5745 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5746 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 5747 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5748 // CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 5749 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 5750 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 5751 // CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 5752 // CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 5753 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 5754 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5755 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5756 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5757 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5758 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 5759 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5760 // CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 5761 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 5762 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 5763 // CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 5764 // CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 5765 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 5766 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5767 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 5768 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5769 // CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 5770 // CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 5771 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 5772 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 5773 // CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 5774 // CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 5775 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 5776 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 5777 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 5778 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5779 // CHECK4-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5780 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5781 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 5782 // CHECK4-NEXT: ret void 5783 // 5784 // 5785 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7 5786 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 5787 // CHECK4-NEXT: entry: 5788 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5789 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5790 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5791 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 5792 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5793 // CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 5794 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 5795 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 5796 // CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 5797 // CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 5798 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 5799 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5800 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5801 // CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1 5802 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5803 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5804 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5805 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5806 // CHECK4-NEXT: [[IT:%.*]] = alloca i8, align 1 5807 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5808 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5809 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5810 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 5811 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5812 // CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 5813 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 5814 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 5815 // CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 5816 // CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 5817 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 5818 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5819 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 5820 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5821 // CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 5822 // CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 5823 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 5824 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 5825 // CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 5826 // CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 5827 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5828 // CHECK4-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 5829 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5830 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5831 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5832 // CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5833 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 5834 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 5835 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 5836 // CHECK4: omp.dispatch.cond: 5837 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5838 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 5839 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5840 // CHECK4: cond.true: 5841 // CHECK4-NEXT: br label [[COND_END:%.*]] 5842 // CHECK4: cond.false: 5843 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5844 // CHECK4-NEXT: br label [[COND_END]] 5845 // CHECK4: cond.end: 5846 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 5847 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5848 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5849 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 5850 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5851 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5852 // CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 5853 // CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 5854 // CHECK4: omp.dispatch.body: 5855 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5856 // CHECK4: omp.inner.for.cond: 5857 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 5858 // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36 5859 // CHECK4-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 5860 // CHECK4-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5861 // CHECK4: omp.inner.for.body: 5862 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 5863 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 5864 // CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 5865 // CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 5866 // CHECK4-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36 5867 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36 5868 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 5869 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36 5870 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 5871 // CHECK4-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36 5872 // CHECK4-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 5873 // CHECK4-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 5874 // CHECK4-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 5875 // CHECK4-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36 5876 // CHECK4-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 5877 // CHECK4-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 5878 // CHECK4-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 5879 // CHECK4-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 5880 // CHECK4-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 5881 // CHECK4-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 5882 // CHECK4-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 5883 // CHECK4-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 5884 // CHECK4-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 5885 // CHECK4-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 5886 // CHECK4-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 5887 // CHECK4-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 5888 // CHECK4-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 5889 // CHECK4-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 5890 // CHECK4-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 5891 // CHECK4-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 5892 // CHECK4-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 5893 // CHECK4-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 5894 // CHECK4-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36 5895 // CHECK4-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 5896 // CHECK4-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36 5897 // CHECK4-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 5898 // CHECK4-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36 5899 // CHECK4-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 5900 // CHECK4-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 5901 // CHECK4-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 5902 // CHECK4-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36 5903 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5904 // CHECK4: omp.body.continue: 5905 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5906 // CHECK4: omp.inner.for.inc: 5907 // CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 5908 // CHECK4-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 5909 // CHECK4-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 5910 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 5911 // CHECK4: omp.inner.for.end: 5912 // CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 5913 // CHECK4: omp.dispatch.inc: 5914 // CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5915 // CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5916 // CHECK4-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 5917 // CHECK4-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 5918 // CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5919 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5920 // CHECK4-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 5921 // CHECK4-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 5922 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND]] 5923 // CHECK4: omp.dispatch.end: 5924 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 5925 // CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5926 // CHECK4-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 5927 // CHECK4-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5928 // CHECK4: .omp.final.then: 5929 // CHECK4-NEXT: store i8 96, i8* [[IT]], align 1 5930 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5931 // CHECK4: .omp.final.done: 5932 // CHECK4-NEXT: ret void 5933 // 5934 // 5935 // CHECK4-LABEL: define {{[^@]+}}@_Z3bari 5936 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 5937 // CHECK4-NEXT: entry: 5938 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5939 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 5940 // CHECK4-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 5941 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5942 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 5943 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5944 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 5945 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 5946 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 5947 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A]], align 4 5948 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5949 // CHECK4-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 5950 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 5951 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 5952 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 5953 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 5954 // CHECK4-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 5955 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 5956 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 5957 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 5958 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 5959 // CHECK4-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 5960 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 5961 // CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 5962 // CHECK4-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 5963 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 5964 // CHECK4-NEXT: ret i32 [[TMP8]] 5965 // 5966 // 5967 // CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 5968 // CHECK4-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 5969 // CHECK4-NEXT: entry: 5970 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 5971 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5972 // CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4 5973 // CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 5974 // CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 5975 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 5976 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 5977 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 5978 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 5979 // CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 5980 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 5981 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5982 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 5983 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 5984 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 5985 // CHECK4-NEXT: store i32 [[ADD]], i32* [[B]], align 4 5986 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5987 // CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 5988 // CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 5989 // CHECK4-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 5990 // CHECK4-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 5991 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 5992 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 5993 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 5994 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 5995 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 5996 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 5997 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 5998 // CHECK4: omp_if.then: 5999 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6000 // CHECK4-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 6001 // CHECK4-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 6002 // CHECK4-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 6003 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6004 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 6005 // CHECK4-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 6006 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6007 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 6008 // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 6009 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6010 // CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 6011 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6012 // CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 6013 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6014 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 6015 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 6016 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6017 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 6018 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 6019 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 6020 // CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 6021 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6022 // CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 6023 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6024 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 6025 // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 6026 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6027 // CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 6028 // CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 6029 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 6030 // CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 6031 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6032 // CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 6033 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 6034 // CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 6035 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 6036 // CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 6037 // CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 6038 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 6039 // CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 6040 // CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 6041 // CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 6042 // CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 6043 // CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 6044 // CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 6045 // CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 6046 // CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 6047 // CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 6048 // CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 6049 // CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 6050 // CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 6051 // CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 6052 // CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 6053 // CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6054 // CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6055 // CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6056 // CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6057 // CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 6058 // CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6059 // CHECK4: omp_offload.failed: 6060 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 6061 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6062 // CHECK4: omp_offload.cont: 6063 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6064 // CHECK4: omp_if.else: 6065 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 6066 // CHECK4-NEXT: br label [[OMP_IF_END]] 6067 // CHECK4: omp_if.end: 6068 // CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 6069 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 6070 // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 6071 // CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 6072 // CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 6073 // CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 6074 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 6075 // CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 6076 // CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 6077 // CHECK4-NEXT: ret i32 [[ADD3]] 6078 // 6079 // 6080 // CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici 6081 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 6082 // CHECK4-NEXT: entry: 6083 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6084 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 6085 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 6086 // CHECK4-NEXT: [[AAA:%.*]] = alloca i8, align 1 6087 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 6088 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6089 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6090 // CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 6091 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 6092 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 6093 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 6094 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6095 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 6096 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 6097 // CHECK4-NEXT: store i8 0, i8* [[AAA]], align 1 6098 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 6099 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 6100 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 6101 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 6102 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6103 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 6104 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6105 // CHECK4-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 6106 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 6107 // CHECK4-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 6108 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 6109 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 6110 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 6111 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6112 // CHECK4: omp_if.then: 6113 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6114 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 6115 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 6116 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6117 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 6118 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 6119 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6120 // CHECK4-NEXT: store i8* null, i8** [[TMP11]], align 4 6121 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6122 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 6123 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 6124 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6125 // CHECK4-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 6126 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 6127 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6128 // CHECK4-NEXT: store i8* null, i8** [[TMP16]], align 4 6129 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6130 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 6131 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 6132 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6133 // CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 6134 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 6135 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6136 // CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 6137 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 6138 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 6139 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 6140 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 6141 // CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 6142 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 6143 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 6144 // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 6145 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6146 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6147 // CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6148 // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 6149 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6150 // CHECK4: omp_offload.failed: 6151 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 6152 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6153 // CHECK4: omp_offload.cont: 6154 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6155 // CHECK4: omp_if.else: 6156 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 6157 // CHECK4-NEXT: br label [[OMP_IF_END]] 6158 // CHECK4: omp_if.end: 6159 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 6160 // CHECK4-NEXT: ret i32 [[TMP31]] 6161 // 6162 // 6163 // CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6164 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 6165 // CHECK4-NEXT: entry: 6166 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6167 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 6168 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 6169 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 6170 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6171 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6172 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 6173 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 6174 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 6175 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6176 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 6177 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 6178 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 6179 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 6180 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 6181 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 6182 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6183 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 6184 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6185 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6186 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 6187 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6188 // CHECK4: omp_if.then: 6189 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6190 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 6191 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 6192 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6193 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 6194 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 6195 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6196 // CHECK4-NEXT: store i8* null, i8** [[TMP9]], align 4 6197 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6198 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 6199 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 6200 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6201 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 6202 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 6203 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6204 // CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 6205 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6206 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 6207 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 6208 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6209 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 6210 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 6211 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6212 // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 6213 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6214 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6215 // CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6216 // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 6217 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6218 // CHECK4: omp_offload.failed: 6219 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 6220 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6221 // CHECK4: omp_offload.cont: 6222 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6223 // CHECK4: omp_if.else: 6224 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 6225 // CHECK4-NEXT: br label [[OMP_IF_END]] 6226 // CHECK4: omp_if.end: 6227 // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 6228 // CHECK4-NEXT: ret i32 [[TMP24]] 6229 // 6230 // 6231 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 6232 // CHECK4-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 6233 // CHECK4-NEXT: entry: 6234 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6235 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6236 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6237 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6238 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 6239 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6240 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6241 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6242 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6243 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6244 // CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 6245 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6246 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6247 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6248 // CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 6249 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 6250 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 6251 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 6252 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 6253 // CHECK4-NEXT: ret void 6254 // 6255 // 6256 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 6257 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 6258 // CHECK4-NEXT: entry: 6259 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6260 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6261 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6262 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6263 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6264 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6265 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 6266 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 6267 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 6268 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 6269 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 6270 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 6271 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6272 // CHECK4-NEXT: [[IT:%.*]] = alloca i64, align 8 6273 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6274 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6275 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6276 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6277 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6278 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6279 // CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 6280 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6281 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6282 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6283 // CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 6284 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 6285 // CHECK4-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 6286 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 6287 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6288 // CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6289 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 6290 // CHECK4-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 6291 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6292 // CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 6293 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6294 // CHECK4: cond.true: 6295 // CHECK4-NEXT: br label [[COND_END:%.*]] 6296 // CHECK4: cond.false: 6297 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6298 // CHECK4-NEXT: br label [[COND_END]] 6299 // CHECK4: cond.end: 6300 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 6301 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 6302 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 6303 // CHECK4-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 6304 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6305 // CHECK4: omp.inner.for.cond: 6306 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 6307 // CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39 6308 // CHECK4-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 6309 // CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6310 // CHECK4: omp.inner.for.body: 6311 // CHECK4-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 6312 // CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 6313 // CHECK4-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 6314 // CHECK4-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39 6315 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39 6316 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 6317 // CHECK4-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6318 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6319 // CHECK4-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group !39 6320 // CHECK4-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 6321 // CHECK4-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !39 6322 // CHECK4-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 6323 // CHECK4-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group !39 6324 // CHECK4-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 6325 // CHECK4-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 6326 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 6327 // CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 6328 // CHECK4-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !39 6329 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6330 // CHECK4: omp.body.continue: 6331 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6332 // CHECK4: omp.inner.for.inc: 6333 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 6334 // CHECK4-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 6335 // CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 6336 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 6337 // CHECK4: omp.inner.for.end: 6338 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6339 // CHECK4: omp.loop.exit: 6340 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 6341 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6342 // CHECK4-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 6343 // CHECK4-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6344 // CHECK4: .omp.final.then: 6345 // CHECK4-NEXT: store i64 400, i64* [[IT]], align 8 6346 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 6347 // CHECK4: .omp.final.done: 6348 // CHECK4-NEXT: ret void 6349 // 6350 // 6351 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 6352 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 6353 // CHECK4-NEXT: entry: 6354 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6355 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6356 // CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 6357 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6358 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6359 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6360 // CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 6361 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6362 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6363 // CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 6364 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6365 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6366 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 6367 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6368 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6369 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 6370 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 6371 // CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 6372 // CHECK4-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6373 // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 6374 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6375 // CHECK4-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 6376 // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 6377 // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 6378 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 6379 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 6380 // CHECK4-NEXT: ret void 6381 // 6382 // 6383 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 6384 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 6385 // CHECK4-NEXT: entry: 6386 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6387 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6388 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6389 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6390 // CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 6391 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6392 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6393 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 6394 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6395 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6396 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6397 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6398 // CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 6399 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6400 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6401 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 6402 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6403 // CHECK4-NEXT: ret void 6404 // 6405 // 6406 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 6407 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 6408 // CHECK4-NEXT: entry: 6409 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6410 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6411 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6412 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6413 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6414 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6415 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6416 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6417 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6418 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6419 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6420 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 6421 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 6422 // CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 6423 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6424 // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 6425 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6426 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 6427 // CHECK4-NEXT: ret void 6428 // 6429 // 6430 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 6431 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 6432 // CHECK4-NEXT: entry: 6433 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6434 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6435 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6436 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6437 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6438 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 6439 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 6440 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 6441 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 6442 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 6443 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6444 // CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8 6445 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6446 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6447 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6448 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6449 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6450 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6451 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6452 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 6453 // CHECK4-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 6454 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 6455 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6456 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6457 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 6458 // CHECK4-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 6459 // CHECK4-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6460 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 6461 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6462 // CHECK4: cond.true: 6463 // CHECK4-NEXT: br label [[COND_END:%.*]] 6464 // CHECK4: cond.false: 6465 // CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6466 // CHECK4-NEXT: br label [[COND_END]] 6467 // CHECK4: cond.end: 6468 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 6469 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 6470 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 6471 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 6472 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6473 // CHECK4: omp.inner.for.cond: 6474 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 6475 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !42 6476 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 6477 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6478 // CHECK4: omp.inner.for.body: 6479 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 6480 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 6481 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 6482 // CHECK4-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !42 6483 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42 6484 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 6485 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !42 6486 // CHECK4-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !42 6487 // CHECK4-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 6488 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 6489 // CHECK4-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 6490 // CHECK4-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !42 6491 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 6492 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42 6493 // CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 6494 // CHECK4-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42 6495 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6496 // CHECK4: omp.body.continue: 6497 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6498 // CHECK4: omp.inner.for.inc: 6499 // CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 6500 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 6501 // CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42 6502 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] 6503 // CHECK4: omp.inner.for.end: 6504 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6505 // CHECK4: omp.loop.exit: 6506 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 6507 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6508 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 6509 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6510 // CHECK4: .omp.final.then: 6511 // CHECK4-NEXT: store i64 11, i64* [[I]], align 8 6512 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 6513 // CHECK4: .omp.final.done: 6514 // CHECK4-NEXT: ret void 6515 // 6516 // 6517 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 6518 // CHECK4-SAME: () #[[ATTR7:[0-9]+]] { 6519 // CHECK4-NEXT: entry: 6520 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 6521 // CHECK4-NEXT: ret void 6522 // 6523 // 6524 // CHECK5-LABEL: define {{[^@]+}}@_Z7get_valv 6525 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { 6526 // CHECK5-NEXT: entry: 6527 // CHECK5-NEXT: ret i64 0 6528 // 6529 // 6530 // CHECK5-LABEL: define {{[^@]+}}@_Z3fooi 6531 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 6532 // CHECK5-NEXT: entry: 6533 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6534 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 6535 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 6536 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x float], align 4 6537 // CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 6538 // CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 6539 // CHECK5-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 6540 // CHECK5-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 6541 // CHECK5-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 6542 // CHECK5-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 6543 // CHECK5-NEXT: [[K:%.*]] = alloca i64, align 8 6544 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 6545 // CHECK5-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 6546 // CHECK5-NEXT: [[LIN:%.*]] = alloca i32, align 4 6547 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 6548 // CHECK5-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 6549 // CHECK5-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 6550 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 6551 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 6552 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 6553 // CHECK5-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 6554 // CHECK5-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 6555 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 6556 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 6557 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 6558 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6559 // CHECK5-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 6560 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 6561 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 6562 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 6563 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 6564 // CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 6565 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 6566 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6567 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 6568 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 6569 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6570 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 6571 // CHECK5-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 6572 // CHECK5-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 6573 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 6574 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 6575 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6576 // CHECK5-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 6577 // CHECK5-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 6578 // CHECK5-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 6579 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 6580 // CHECK5-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 6581 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* 6582 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 6583 // CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) 6584 // CHECK5-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 6585 // CHECK5-NEXT: store i64 [[CALL]], i64* [[K]], align 8 6586 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 6587 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 6588 // CHECK5-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 6589 // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 6590 // CHECK5-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 6591 // CHECK5-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 6592 // CHECK5-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 6593 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] 6594 // CHECK5-NEXT: store i32 12, i32* [[LIN]], align 4 6595 // CHECK5-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 6596 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 6597 // CHECK5-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 6598 // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 6599 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 6600 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 6601 // CHECK5-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 6602 // CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 6603 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 6604 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 6605 // CHECK5-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 6606 // CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 6607 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6608 // CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 6609 // CHECK5-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 6610 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6611 // CHECK5-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* 6612 // CHECK5-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 6613 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 6614 // CHECK5-NEXT: store i8* null, i8** [[TMP25]], align 8 6615 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6616 // CHECK5-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 6617 // CHECK5-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 6618 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6619 // CHECK5-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 6620 // CHECK5-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 6621 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 6622 // CHECK5-NEXT: store i8* null, i8** [[TMP30]], align 8 6623 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6624 // CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 6625 // CHECK5-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 6626 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6627 // CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 6628 // CHECK5-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 6629 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 6630 // CHECK5-NEXT: store i8* null, i8** [[TMP35]], align 8 6631 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6632 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6633 // CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6634 // CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 6635 // CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6636 // CHECK5: omp_offload.failed: 6637 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] 6638 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] 6639 // CHECK5: omp_offload.cont: 6640 // CHECK5-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 6641 // CHECK5-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 6642 // CHECK5-NEXT: store i32 [[TMP40]], i32* [[CONV7]], align 4 6643 // CHECK5-NEXT: [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8 6644 // CHECK5-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2 6645 // CHECK5-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 6646 // CHECK5-NEXT: store i16 [[TMP42]], i16* [[CONV9]], align 2 6647 // CHECK5-NEXT: [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 6648 // CHECK5-NEXT: [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4 6649 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10 6650 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6651 // CHECK5: omp_if.then: 6652 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 6653 // CHECK5-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 6654 // CHECK5-NEXT: store i64 [[TMP41]], i64* [[TMP46]], align 8 6655 // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 6656 // CHECK5-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* 6657 // CHECK5-NEXT: store i64 [[TMP41]], i64* [[TMP48]], align 8 6658 // CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 6659 // CHECK5-NEXT: store i8* null, i8** [[TMP49]], align 8 6660 // CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 6661 // CHECK5-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* 6662 // CHECK5-NEXT: store i64 [[TMP43]], i64* [[TMP51]], align 8 6663 // CHECK5-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 6664 // CHECK5-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* 6665 // CHECK5-NEXT: store i64 [[TMP43]], i64* [[TMP53]], align 8 6666 // CHECK5-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 6667 // CHECK5-NEXT: store i8* null, i8** [[TMP54]], align 8 6668 // CHECK5-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 6669 // CHECK5-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 6670 // CHECK5-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6671 // CHECK5-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 6672 // CHECK5-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 6673 // CHECK5: omp_offload.failed13: 6674 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 6675 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT14]] 6676 // CHECK5: omp_offload.cont14: 6677 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 6678 // CHECK5: omp_if.else: 6679 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 6680 // CHECK5-NEXT: br label [[OMP_IF_END]] 6681 // CHECK5: omp_if.end: 6682 // CHECK5-NEXT: [[TMP59:%.*]] = load i32, i32* [[A]], align 4 6683 // CHECK5-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4 6684 // CHECK5-NEXT: [[TMP60:%.*]] = load i32, i32* [[A]], align 4 6685 // CHECK5-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 6686 // CHECK5-NEXT: store i32 [[TMP60]], i32* [[CONV16]], align 4 6687 // CHECK5-NEXT: [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8 6688 // CHECK5-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6689 // CHECK5-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 6690 // CHECK5-NEXT: store i32 [[TMP62]], i32* [[CONV17]], align 4 6691 // CHECK5-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 6692 // CHECK5-NEXT: [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4 6693 // CHECK5-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20 6694 // CHECK5-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 6695 // CHECK5: omp_if.then19: 6696 // CHECK5-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4 6697 // CHECK5-NEXT: [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]] 6698 // CHECK5-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8 6699 // CHECK5-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 6700 // CHECK5-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 6701 // CHECK5-NEXT: store i64 [[TMP61]], i64* [[TMP69]], align 8 6702 // CHECK5-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 6703 // CHECK5-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* 6704 // CHECK5-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 6705 // CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6706 // CHECK5-NEXT: store i64 4, i64* [[TMP72]], align 8 6707 // CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 6708 // CHECK5-NEXT: store i8* null, i8** [[TMP73]], align 8 6709 // CHECK5-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 6710 // CHECK5-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 6711 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 6712 // CHECK5-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 6713 // CHECK5-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** 6714 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 6715 // CHECK5-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 6716 // CHECK5-NEXT: store i64 40, i64* [[TMP78]], align 8 6717 // CHECK5-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 6718 // CHECK5-NEXT: store i8* null, i8** [[TMP79]], align 8 6719 // CHECK5-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 6720 // CHECK5-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* 6721 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 6722 // CHECK5-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 6723 // CHECK5-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* 6724 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 6725 // CHECK5-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 6726 // CHECK5-NEXT: store i64 8, i64* [[TMP84]], align 8 6727 // CHECK5-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 6728 // CHECK5-NEXT: store i8* null, i8** [[TMP85]], align 8 6729 // CHECK5-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 6730 // CHECK5-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 6731 // CHECK5-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 6732 // CHECK5-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 6733 // CHECK5-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** 6734 // CHECK5-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 6735 // CHECK5-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 6736 // CHECK5-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 6737 // CHECK5-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 6738 // CHECK5-NEXT: store i8* null, i8** [[TMP91]], align 8 6739 // CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 6740 // CHECK5-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 6741 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 6742 // CHECK5-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 6743 // CHECK5-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** 6744 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 6745 // CHECK5-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 6746 // CHECK5-NEXT: store i64 400, i64* [[TMP96]], align 8 6747 // CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 6748 // CHECK5-NEXT: store i8* null, i8** [[TMP97]], align 8 6749 // CHECK5-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 6750 // CHECK5-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* 6751 // CHECK5-NEXT: store i64 5, i64* [[TMP99]], align 8 6752 // CHECK5-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 6753 // CHECK5-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* 6754 // CHECK5-NEXT: store i64 5, i64* [[TMP101]], align 8 6755 // CHECK5-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 6756 // CHECK5-NEXT: store i64 8, i64* [[TMP102]], align 8 6757 // CHECK5-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 6758 // CHECK5-NEXT: store i8* null, i8** [[TMP103]], align 8 6759 // CHECK5-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 6760 // CHECK5-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* 6761 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 6762 // CHECK5-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 6763 // CHECK5-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* 6764 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 6765 // CHECK5-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 6766 // CHECK5-NEXT: store i64 8, i64* [[TMP108]], align 8 6767 // CHECK5-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 6768 // CHECK5-NEXT: store i8* null, i8** [[TMP109]], align 8 6769 // CHECK5-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 6770 // CHECK5-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 6771 // CHECK5-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 6772 // CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 6773 // CHECK5-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** 6774 // CHECK5-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 6775 // CHECK5-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 6776 // CHECK5-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 6777 // CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 6778 // CHECK5-NEXT: store i8* null, i8** [[TMP115]], align 8 6779 // CHECK5-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 6780 // CHECK5-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 6781 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 6782 // CHECK5-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 6783 // CHECK5-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** 6784 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 6785 // CHECK5-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 6786 // CHECK5-NEXT: store i64 16, i64* [[TMP120]], align 8 6787 // CHECK5-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 6788 // CHECK5-NEXT: store i8* null, i8** [[TMP121]], align 8 6789 // CHECK5-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 6790 // CHECK5-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* 6791 // CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 6792 // CHECK5-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 6793 // CHECK5-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* 6794 // CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 6795 // CHECK5-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 6796 // CHECK5-NEXT: store i64 4, i64* [[TMP126]], align 8 6797 // CHECK5-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 6798 // CHECK5-NEXT: store i8* null, i8** [[TMP127]], align 8 6799 // CHECK5-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 6800 // CHECK5-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 6801 // CHECK5-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6802 // CHECK5-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6803 // CHECK5-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 6804 // CHECK5-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 6805 // CHECK5: omp_offload.failed23: 6806 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 6807 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT24]] 6808 // CHECK5: omp_offload.cont24: 6809 // CHECK5-NEXT: br label [[OMP_IF_END26:%.*]] 6810 // CHECK5: omp_if.else25: 6811 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 6812 // CHECK5-NEXT: br label [[OMP_IF_END26]] 6813 // CHECK5: omp_if.end26: 6814 // CHECK5-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 6815 // CHECK5-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 6816 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) 6817 // CHECK5-NEXT: ret i32 [[TMP133]] 6818 // 6819 // 6820 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 6821 // CHECK5-SAME: () #[[ATTR2:[0-9]+]] { 6822 // CHECK5-NEXT: entry: 6823 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 6824 // CHECK5-NEXT: ret void 6825 // 6826 // 6827 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined. 6828 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 6829 // CHECK5-NEXT: entry: 6830 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6831 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6832 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6833 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 6834 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6835 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6836 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6837 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6838 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 6839 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6840 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6841 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6842 // CHECK5-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 6843 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6844 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6845 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6846 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 6847 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6848 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6849 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 6850 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6851 // CHECK5: cond.true: 6852 // CHECK5-NEXT: br label [[COND_END:%.*]] 6853 // CHECK5: cond.false: 6854 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6855 // CHECK5-NEXT: br label [[COND_END]] 6856 // CHECK5: cond.end: 6857 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 6858 // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6859 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6860 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 6861 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6862 // CHECK5: omp.inner.for.cond: 6863 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 6864 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 6865 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 6866 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6867 // CHECK5: omp.inner.for.body: 6868 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 6869 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 6870 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 6871 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10 6872 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6873 // CHECK5: omp.body.continue: 6874 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6875 // CHECK5: omp.inner.for.inc: 6876 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 6877 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 6878 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 6879 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 6880 // CHECK5: omp.inner.for.end: 6881 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6882 // CHECK5: omp.loop.exit: 6883 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 6884 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6885 // CHECK5-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 6886 // CHECK5-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6887 // CHECK5: .omp.final.then: 6888 // CHECK5-NEXT: store i32 33, i32* [[I]], align 4 6889 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 6890 // CHECK5: .omp.final.done: 6891 // CHECK5-NEXT: ret void 6892 // 6893 // 6894 // CHECK5-LABEL: define {{[^@]+}}@.omp_task_entry. 6895 // CHECK5-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 6896 // CHECK5-NEXT: entry: 6897 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 6898 // CHECK5-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 6899 // CHECK5-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 6900 // CHECK5-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 6901 // CHECK5-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 6902 // CHECK5-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 6903 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 6904 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 6905 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 6906 // CHECK5-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 6907 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 6908 // CHECK5-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 6909 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 6910 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 6911 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 6912 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 6913 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 6914 // CHECK5-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 6915 // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 6916 // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 6917 // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 6918 // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 6919 // CHECK5-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 6920 // CHECK5-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 6921 // CHECK5-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 6922 // CHECK5-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 6923 // CHECK5-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 6924 // CHECK5-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 6925 // CHECK5-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 6926 // CHECK5-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 6927 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 6928 // CHECK5-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 6929 // CHECK5: omp_offload.failed.i: 6930 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 6931 // CHECK5-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 6932 // CHECK5: .omp_outlined..1.exit: 6933 // CHECK5-NEXT: ret i32 0 6934 // 6935 // 6936 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 6937 // CHECK5-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 6938 // CHECK5-NEXT: entry: 6939 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6940 // CHECK5-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 6941 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 6942 // CHECK5-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 6943 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6944 // CHECK5-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 6945 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6946 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 6947 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 6948 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 6949 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 6950 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 6951 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 6952 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 6953 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 6954 // CHECK5-NEXT: ret void 6955 // 6956 // 6957 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2 6958 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 6959 // CHECK5-NEXT: entry: 6960 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6961 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6962 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6963 // CHECK5-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 6964 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6965 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 6966 // CHECK5-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 6967 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6968 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6969 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6970 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6971 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 6972 // CHECK5-NEXT: [[K1:%.*]] = alloca i64, align 8 6973 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6974 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6975 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6976 // CHECK5-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 6977 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6978 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 6979 // CHECK5-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 6980 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6981 // CHECK5-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 6982 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6983 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6984 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6985 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 6986 // CHECK5-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 6987 // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 6988 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 6989 // CHECK5: omp.dispatch.cond: 6990 // CHECK5-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 6991 // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 6992 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 6993 // CHECK5: omp.dispatch.body: 6994 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6995 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 6996 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6997 // CHECK5: omp.inner.for.cond: 6998 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 6999 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26 7000 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7001 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7002 // CHECK5: omp.inner.for.body: 7003 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 7004 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 7005 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 7006 // CHECK5-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26 7007 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26 7008 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 7009 // CHECK5-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 7010 // CHECK5-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 7011 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 7012 // CHECK5-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26 7013 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 7014 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 7015 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !26 7016 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7017 // CHECK5: omp.body.continue: 7018 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7019 // CHECK5: omp.inner.for.inc: 7020 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 7021 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 7022 // CHECK5-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 7023 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 7024 // CHECK5: omp.inner.for.end: 7025 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 7026 // CHECK5: omp.dispatch.inc: 7027 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] 7028 // CHECK5: omp.dispatch.end: 7029 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7030 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 7031 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7032 // CHECK5: .omp.final.then: 7033 // CHECK5-NEXT: store i32 1, i32* [[I]], align 4 7034 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 7035 // CHECK5: .omp.final.done: 7036 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7037 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 7038 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 7039 // CHECK5: .omp.linear.pu: 7040 // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 7041 // CHECK5-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 7042 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 7043 // CHECK5: .omp.linear.pu.done: 7044 // CHECK5-NEXT: ret void 7045 // 7046 // 7047 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 7048 // CHECK5-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 7049 // CHECK5-NEXT: entry: 7050 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7051 // CHECK5-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 7052 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7053 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7054 // CHECK5-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 7055 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7056 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7057 // CHECK5-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 7058 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7059 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7060 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 7061 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7062 // CHECK5-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 7063 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7064 // CHECK5-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 7065 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7066 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 7067 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 7068 // CHECK5-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 7069 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 7070 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 7071 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7072 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 7073 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 7074 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 7075 // CHECK5-NEXT: ret void 7076 // 7077 // 7078 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3 7079 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 7080 // CHECK5-NEXT: entry: 7081 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7082 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7083 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7084 // CHECK5-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 7085 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7086 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 7087 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 7088 // CHECK5-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 7089 // CHECK5-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 7090 // CHECK5-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 7091 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 7092 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 7093 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 7094 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7095 // CHECK5-NEXT: [[IT:%.*]] = alloca i64, align 8 7096 // CHECK5-NEXT: [[LIN4:%.*]] = alloca i32, align 4 7097 // CHECK5-NEXT: [[A5:%.*]] = alloca i32, align 4 7098 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7099 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7100 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7101 // CHECK5-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 7102 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7103 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7104 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 7105 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7106 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 7107 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 7108 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 7109 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 7110 // CHECK5-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 7111 // CHECK5-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 7112 // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 7113 // CHECK5-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 7114 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 7115 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7116 // CHECK5-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7117 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 7118 // CHECK5-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 7119 // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7120 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7121 // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 7122 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7123 // CHECK5: cond.true: 7124 // CHECK5-NEXT: br label [[COND_END:%.*]] 7125 // CHECK5: cond.false: 7126 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7127 // CHECK5-NEXT: br label [[COND_END]] 7128 // CHECK5: cond.end: 7129 // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 7130 // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 7131 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7132 // CHECK5-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 7133 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7134 // CHECK5: omp.inner.for.cond: 7135 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7136 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 7137 // CHECK5-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 7138 // CHECK5-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7139 // CHECK5: omp.inner.for.body: 7140 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7141 // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 7142 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 7143 // CHECK5-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29 7144 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29 7145 // CHECK5-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 7146 // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7147 // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 7148 // CHECK5-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 7149 // CHECK5-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 7150 // CHECK5-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 7151 // CHECK5-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29 7152 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29 7153 // CHECK5-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 7154 // CHECK5-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7155 // CHECK5-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 7156 // CHECK5-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 7157 // CHECK5-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 7158 // CHECK5-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 7159 // CHECK5-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29 7160 // CHECK5-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !29 7161 // CHECK5-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 7162 // CHECK5-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 7163 // CHECK5-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 7164 // CHECK5-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !29 7165 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7166 // CHECK5: omp.body.continue: 7167 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7168 // CHECK5: omp.inner.for.inc: 7169 // CHECK5-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7170 // CHECK5-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 7171 // CHECK5-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 7172 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 7173 // CHECK5: omp.inner.for.end: 7174 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7175 // CHECK5: omp.loop.exit: 7176 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7177 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7178 // CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 7179 // CHECK5-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7180 // CHECK5: .omp.final.then: 7181 // CHECK5-NEXT: store i64 400, i64* [[IT]], align 8 7182 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 7183 // CHECK5: .omp.final.done: 7184 // CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7185 // CHECK5-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 7186 // CHECK5-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 7187 // CHECK5: .omp.linear.pu: 7188 // CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 7189 // CHECK5-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 7190 // CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 7191 // CHECK5-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 7192 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 7193 // CHECK5: .omp.linear.pu.done: 7194 // CHECK5-NEXT: ret void 7195 // 7196 // 7197 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 7198 // CHECK5-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 7199 // CHECK5-NEXT: entry: 7200 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7201 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7202 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7203 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7204 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7205 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7206 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7207 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7208 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 7209 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7210 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 7211 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 7212 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 7213 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7214 // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 7215 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7216 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 7217 // CHECK5-NEXT: ret void 7218 // 7219 // 7220 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4 7221 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 7222 // CHECK5-NEXT: entry: 7223 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7224 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7225 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7226 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7227 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7228 // CHECK5-NEXT: [[TMP:%.*]] = alloca i16, align 2 7229 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7230 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7231 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7232 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7233 // CHECK5-NEXT: [[IT:%.*]] = alloca i16, align 2 7234 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7235 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7236 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7237 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7238 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7239 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7240 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7241 // CHECK5-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 7242 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7243 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7244 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7245 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7246 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7247 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7248 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 7249 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7250 // CHECK5: cond.true: 7251 // CHECK5-NEXT: br label [[COND_END:%.*]] 7252 // CHECK5: cond.false: 7253 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7254 // CHECK5-NEXT: br label [[COND_END]] 7255 // CHECK5: cond.end: 7256 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7257 // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7258 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7259 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7260 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7261 // CHECK5: omp.inner.for.cond: 7262 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 7263 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32 7264 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7265 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7266 // CHECK5: omp.inner.for.body: 7267 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 7268 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 7269 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 7270 // CHECK5-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 7271 // CHECK5-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32 7272 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 7273 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 7274 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !32 7275 // CHECK5-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 7276 // CHECK5-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 7277 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 7278 // CHECK5-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 7279 // CHECK5-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !32 7280 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7281 // CHECK5: omp.body.continue: 7282 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7283 // CHECK5: omp.inner.for.inc: 7284 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 7285 // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 7286 // CHECK5-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 7287 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 7288 // CHECK5: omp.inner.for.end: 7289 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7290 // CHECK5: omp.loop.exit: 7291 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7292 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7293 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 7294 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7295 // CHECK5: .omp.final.then: 7296 // CHECK5-NEXT: store i16 22, i16* [[IT]], align 2 7297 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 7298 // CHECK5: .omp.final.done: 7299 // CHECK5-NEXT: ret void 7300 // 7301 // 7302 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 7303 // CHECK5-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 7304 // CHECK5-NEXT: entry: 7305 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7306 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 7307 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7308 // CHECK5-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 7309 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 7310 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7311 // CHECK5-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 7312 // CHECK5-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 7313 // CHECK5-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 7314 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7315 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7316 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 7317 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7318 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 7319 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7320 // CHECK5-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 7321 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 7322 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7323 // CHECK5-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 7324 // CHECK5-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 7325 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 7326 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7327 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7328 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 7329 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7330 // CHECK5-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 7331 // CHECK5-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 7332 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7333 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 7334 // CHECK5-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 7335 // CHECK5-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 7336 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 7337 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 7338 // CHECK5-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7339 // CHECK5-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 7340 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 7341 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 7342 // CHECK5-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 7343 // CHECK5-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 7344 // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 7345 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 7346 // CHECK5-NEXT: ret void 7347 // 7348 // 7349 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7 7350 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 7351 // CHECK5-NEXT: entry: 7352 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7353 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7354 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7355 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 7356 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7357 // CHECK5-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 7358 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 7359 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7360 // CHECK5-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 7361 // CHECK5-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 7362 // CHECK5-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 7363 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7364 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7365 // CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1 7366 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7367 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7368 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7369 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7370 // CHECK5-NEXT: [[IT:%.*]] = alloca i8, align 1 7371 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7372 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7373 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7374 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 7375 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7376 // CHECK5-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 7377 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 7378 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7379 // CHECK5-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 7380 // CHECK5-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 7381 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 7382 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7383 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7384 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 7385 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7386 // CHECK5-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 7387 // CHECK5-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 7388 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7389 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 7390 // CHECK5-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 7391 // CHECK5-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 7392 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 7393 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7394 // CHECK5-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 7395 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7396 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7397 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 7398 // CHECK5-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7399 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 7400 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 7401 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 7402 // CHECK5: omp.dispatch.cond: 7403 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7404 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 7405 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7406 // CHECK5: cond.true: 7407 // CHECK5-NEXT: br label [[COND_END:%.*]] 7408 // CHECK5: cond.false: 7409 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7410 // CHECK5-NEXT: br label [[COND_END]] 7411 // CHECK5: cond.end: 7412 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 7413 // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7414 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7415 // CHECK5-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 7416 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7417 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7418 // CHECK5-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 7419 // CHECK5-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 7420 // CHECK5: omp.dispatch.body: 7421 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7422 // CHECK5: omp.inner.for.cond: 7423 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 7424 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 7425 // CHECK5-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 7426 // CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7427 // CHECK5: omp.inner.for.body: 7428 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 7429 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 7430 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 7431 // CHECK5-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 7432 // CHECK5-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35 7433 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !35 7434 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 7435 // CHECK5-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !35 7436 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 7437 // CHECK5-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35 7438 // CHECK5-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 7439 // CHECK5-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 7440 // CHECK5-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 7441 // CHECK5-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35 7442 // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 7443 // CHECK5-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 7444 // CHECK5-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 7445 // CHECK5-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 7446 // CHECK5-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 7447 // CHECK5-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 7448 // CHECK5-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 7449 // CHECK5-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 7450 // CHECK5-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 7451 // CHECK5-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 7452 // CHECK5-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 7453 // CHECK5-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 7454 // CHECK5-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 7455 // CHECK5-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 7456 // CHECK5-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 7457 // CHECK5-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 7458 // CHECK5-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 7459 // CHECK5-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 7460 // CHECK5-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35 7461 // CHECK5-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 7462 // CHECK5-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35 7463 // CHECK5-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 7464 // CHECK5-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35 7465 // CHECK5-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 7466 // CHECK5-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 7467 // CHECK5-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 7468 // CHECK5-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35 7469 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7470 // CHECK5: omp.body.continue: 7471 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7472 // CHECK5: omp.inner.for.inc: 7473 // CHECK5-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 7474 // CHECK5-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 7475 // CHECK5-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 7476 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 7477 // CHECK5: omp.inner.for.end: 7478 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 7479 // CHECK5: omp.dispatch.inc: 7480 // CHECK5-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7481 // CHECK5-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 7482 // CHECK5-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 7483 // CHECK5-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 7484 // CHECK5-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7485 // CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 7486 // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 7487 // CHECK5-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 7488 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] 7489 // CHECK5: omp.dispatch.end: 7490 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 7491 // CHECK5-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7492 // CHECK5-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 7493 // CHECK5-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7494 // CHECK5: .omp.final.then: 7495 // CHECK5-NEXT: store i8 96, i8* [[IT]], align 1 7496 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 7497 // CHECK5: .omp.final.done: 7498 // CHECK5-NEXT: ret void 7499 // 7500 // 7501 // CHECK5-LABEL: define {{[^@]+}}@_Z3bari 7502 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 7503 // CHECK5-NEXT: entry: 7504 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 7505 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 7506 // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 7507 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 7508 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 7509 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 7510 // CHECK5-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 7511 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 7512 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 7513 // CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4 7514 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 7515 // CHECK5-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 7516 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 7517 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 7518 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 7519 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 7520 // CHECK5-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 7521 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 7522 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 7523 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 7524 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 7525 // CHECK5-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 7526 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 7527 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 7528 // CHECK5-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 7529 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 7530 // CHECK5-NEXT: ret i32 [[TMP8]] 7531 // 7532 // 7533 // CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 7534 // CHECK5-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 7535 // CHECK5-NEXT: entry: 7536 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 7537 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 7538 // CHECK5-NEXT: [[B:%.*]] = alloca i32, align 4 7539 // CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 7540 // CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 7541 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 7542 // CHECK5-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 7543 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 7544 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 7545 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 7546 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 7547 // CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 7548 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 7549 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 7550 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 7551 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 7552 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 7553 // CHECK5-NEXT: store i32 [[ADD]], i32* [[B]], align 4 7554 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 7555 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 7556 // CHECK5-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 7557 // CHECK5-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 7558 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 7559 // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 7560 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 7561 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 7562 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 7563 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 7564 // CHECK5-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 7565 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[B]], align 4 7566 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 7567 // CHECK5-NEXT: store i32 [[TMP6]], i32* [[CONV]], align 4 7568 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8 7569 // CHECK5-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 7570 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 7571 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 7572 // CHECK5-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8 7573 // CHECK5-NEXT: store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1 7574 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 7575 // CHECK5-NEXT: [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 7576 // CHECK5-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1 7577 // CHECK5-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7578 // CHECK5: omp_if.then: 7579 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 7580 // CHECK5-NEXT: [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]] 7581 // CHECK5-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 7582 // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7583 // CHECK5-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1** 7584 // CHECK5-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 8 7585 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7586 // CHECK5-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** 7587 // CHECK5-NEXT: store double* [[A]], double** [[TMP16]], align 8 7588 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 7589 // CHECK5-NEXT: store i64 8, i64* [[TMP17]], align 8 7590 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 7591 // CHECK5-NEXT: store i8* null, i8** [[TMP18]], align 8 7592 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 7593 // CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 7594 // CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 7595 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 7596 // CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 7597 // CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 7598 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 7599 // CHECK5-NEXT: store i64 4, i64* [[TMP23]], align 8 7600 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 7601 // CHECK5-NEXT: store i8* null, i8** [[TMP24]], align 8 7602 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 7603 // CHECK5-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 7604 // CHECK5-NEXT: store i64 2, i64* [[TMP26]], align 8 7605 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 7606 // CHECK5-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 7607 // CHECK5-NEXT: store i64 2, i64* [[TMP28]], align 8 7608 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 7609 // CHECK5-NEXT: store i64 8, i64* [[TMP29]], align 8 7610 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 7611 // CHECK5-NEXT: store i8* null, i8** [[TMP30]], align 8 7612 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 7613 // CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 7614 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 7615 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 7616 // CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 7617 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 7618 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 7619 // CHECK5-NEXT: store i64 8, i64* [[TMP35]], align 8 7620 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 7621 // CHECK5-NEXT: store i8* null, i8** [[TMP36]], align 8 7622 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 7623 // CHECK5-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** 7624 // CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 7625 // CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 7626 // CHECK5-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** 7627 // CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 7628 // CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 7629 // CHECK5-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 7630 // CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 7631 // CHECK5-NEXT: store i8* null, i8** [[TMP42]], align 8 7632 // CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 7633 // CHECK5-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* 7634 // CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 7635 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 7636 // CHECK5-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 7637 // CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 7638 // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 7639 // CHECK5-NEXT: store i64 1, i64* [[TMP47]], align 8 7640 // CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 7641 // CHECK5-NEXT: store i8* null, i8** [[TMP48]], align 8 7642 // CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7643 // CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7644 // CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 7645 // CHECK5-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 7646 // CHECK5-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP52]] to i1 7647 // CHECK5-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 7648 // CHECK5-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) 7649 // CHECK5-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 7650 // CHECK5-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 7651 // CHECK5: omp_offload.failed: 7652 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] 7653 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] 7654 // CHECK5: omp_offload.cont: 7655 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 7656 // CHECK5: omp_if.else: 7657 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] 7658 // CHECK5-NEXT: br label [[OMP_IF_END]] 7659 // CHECK5: omp_if.end: 7660 // CHECK5-NEXT: [[TMP56:%.*]] = mul nsw i64 1, [[TMP2]] 7661 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP56]] 7662 // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 7663 // CHECK5-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 7664 // CHECK5-NEXT: [[CONV7:%.*]] = sext i16 [[TMP57]] to i32 7665 // CHECK5-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 7666 // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP58]] 7667 // CHECK5-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 7668 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 7669 // CHECK5-NEXT: ret i32 [[ADD8]] 7670 // 7671 // 7672 // CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici 7673 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 7674 // CHECK5-NEXT: entry: 7675 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 7676 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 7677 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 7678 // CHECK5-NEXT: [[AAA:%.*]] = alloca i8, align 1 7679 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 7680 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7681 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7682 // CHECK5-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 7683 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 7684 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 7685 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 7686 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 7687 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 7688 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 7689 // CHECK5-NEXT: store i8 0, i8* [[AAA]], align 1 7690 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 7691 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7692 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 7693 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 7694 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 7695 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7696 // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 7697 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7698 // CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 7699 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 7700 // CHECK5-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 7701 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 7702 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 7703 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 7704 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7705 // CHECK5: omp_if.then: 7706 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7707 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 7708 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 7709 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7710 // CHECK5-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 7711 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 7712 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 7713 // CHECK5-NEXT: store i8* null, i8** [[TMP11]], align 8 7714 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 7715 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 7716 // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 7717 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 7718 // CHECK5-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 7719 // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 7720 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 7721 // CHECK5-NEXT: store i8* null, i8** [[TMP16]], align 8 7722 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 7723 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 7724 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 7725 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 7726 // CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 7727 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 7728 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 7729 // CHECK5-NEXT: store i8* null, i8** [[TMP21]], align 8 7730 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 7731 // CHECK5-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 7732 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 7733 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 7734 // CHECK5-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 7735 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 7736 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 7737 // CHECK5-NEXT: store i8* null, i8** [[TMP26]], align 8 7738 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7739 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7740 // CHECK5-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 7741 // CHECK5-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 7742 // CHECK5-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 7743 // CHECK5: omp_offload.failed: 7744 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 7745 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] 7746 // CHECK5: omp_offload.cont: 7747 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 7748 // CHECK5: omp_if.else: 7749 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 7750 // CHECK5-NEXT: br label [[OMP_IF_END]] 7751 // CHECK5: omp_if.end: 7752 // CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 7753 // CHECK5-NEXT: ret i32 [[TMP31]] 7754 // 7755 // 7756 // CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 7757 // CHECK5-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 7758 // CHECK5-NEXT: entry: 7759 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 7760 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 7761 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 7762 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 7763 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7764 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7765 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 7766 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 7767 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 7768 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 7769 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 7770 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 7771 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 7772 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7773 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 7774 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 7775 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 7776 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7777 // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 7778 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7779 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 7780 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 7781 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7782 // CHECK5: omp_if.then: 7783 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7784 // CHECK5-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 7785 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 7786 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7787 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 7788 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 7789 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 7790 // CHECK5-NEXT: store i8* null, i8** [[TMP9]], align 8 7791 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 7792 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 7793 // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 7794 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 7795 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 7796 // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 7797 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 7798 // CHECK5-NEXT: store i8* null, i8** [[TMP14]], align 8 7799 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 7800 // CHECK5-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 7801 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 7802 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 7803 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 7804 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 7805 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 7806 // CHECK5-NEXT: store i8* null, i8** [[TMP19]], align 8 7807 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7808 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7809 // CHECK5-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 7810 // CHECK5-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 7811 // CHECK5-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 7812 // CHECK5: omp_offload.failed: 7813 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 7814 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] 7815 // CHECK5: omp_offload.cont: 7816 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 7817 // CHECK5: omp_if.else: 7818 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 7819 // CHECK5-NEXT: br label [[OMP_IF_END]] 7820 // CHECK5: omp_if.end: 7821 // CHECK5-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 7822 // CHECK5-NEXT: ret i32 [[TMP24]] 7823 // 7824 // 7825 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 7826 // CHECK5-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 7827 // CHECK5-NEXT: entry: 7828 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 7829 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 7830 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7831 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7832 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 7833 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7834 // CHECK5-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 7835 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 7836 // CHECK5-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 7837 // CHECK5-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 7838 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 7839 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 7840 // CHECK5-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 7841 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7842 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7843 // CHECK5-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 7844 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7845 // CHECK5-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 7846 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 7847 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7848 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7849 // CHECK5-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 7850 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 7851 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 7852 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* 7853 // CHECK5-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 7854 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 7855 // CHECK5-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 8 7856 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 7857 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 7858 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 7859 // CHECK5-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 7860 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 7861 // CHECK5-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 8 7862 // CHECK5-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 7863 // CHECK5-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7864 // CHECK5: omp_if.then: 7865 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) 7866 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 7867 // CHECK5: omp_if.else: 7868 // CHECK5-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 7869 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 7870 // CHECK5-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 7871 // CHECK5-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] 7872 // CHECK5-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 7873 // CHECK5-NEXT: br label [[OMP_IF_END]] 7874 // CHECK5: omp_if.end: 7875 // CHECK5-NEXT: ret void 7876 // 7877 // 7878 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..9 7879 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 7880 // CHECK5-NEXT: entry: 7881 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7882 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7883 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 7884 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 7885 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7886 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7887 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 7888 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7889 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 7890 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 7891 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 7892 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 7893 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 7894 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7895 // CHECK5-NEXT: [[IT:%.*]] = alloca i64, align 8 7896 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7897 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7898 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 7899 // CHECK5-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 7900 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7901 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7902 // CHECK5-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 7903 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7904 // CHECK5-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 7905 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 7906 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7907 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7908 // CHECK5-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 7909 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 7910 // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 7911 // CHECK5-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 7912 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 7913 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7914 // CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 8 7915 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 7916 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7917 // CHECK5: omp_if.then: 7918 // CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7919 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 7920 // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7921 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7922 // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 7923 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7924 // CHECK5: cond.true: 7925 // CHECK5-NEXT: br label [[COND_END:%.*]] 7926 // CHECK5: cond.false: 7927 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7928 // CHECK5-NEXT: br label [[COND_END]] 7929 // CHECK5: cond.end: 7930 // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 7931 // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 7932 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7933 // CHECK5-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 7934 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7935 // CHECK5: omp.inner.for.cond: 7936 // CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 7937 // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38 7938 // CHECK5-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 7939 // CHECK5-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7940 // CHECK5: omp.inner.for.body: 7941 // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 7942 // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 7943 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 7944 // CHECK5-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38 7945 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !38 7946 // CHECK5-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double 7947 // CHECK5-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 7948 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 7949 // CHECK5-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !39, !llvm.access.group !38 7950 // CHECK5-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 7951 // CHECK5-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38 7952 // CHECK5-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 7953 // CHECK5-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38 7954 // CHECK5-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 7955 // CHECK5-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] 7956 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] 7957 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 7958 // CHECK5-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38 7959 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7960 // CHECK5: omp.body.continue: 7961 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7962 // CHECK5: omp.inner.for.inc: 7963 // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 7964 // CHECK5-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 7965 // CHECK5-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 7966 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 7967 // CHECK5: omp.inner.for.end: 7968 // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] 7969 // CHECK5: omp_if.else: 7970 // CHECK5-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7971 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 7972 // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7973 // CHECK5-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7974 // CHECK5-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 7975 // CHECK5-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] 7976 // CHECK5: cond.true11: 7977 // CHECK5-NEXT: br label [[COND_END13:%.*]] 7978 // CHECK5: cond.false12: 7979 // CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7980 // CHECK5-NEXT: br label [[COND_END13]] 7981 // CHECK5: cond.end13: 7982 // CHECK5-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] 7983 // CHECK5-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 7984 // CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7985 // CHECK5-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 7986 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] 7987 // CHECK5: omp.inner.for.cond15: 7988 // CHECK5-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7989 // CHECK5-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7990 // CHECK5-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 7991 // CHECK5-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 7992 // CHECK5: omp.inner.for.body17: 7993 // CHECK5-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7994 // CHECK5-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 7995 // CHECK5-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] 7996 // CHECK5-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 7997 // CHECK5-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 8 7998 // CHECK5-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double 7999 // CHECK5-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 8000 // CHECK5-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 8001 // CHECK5-NEXT: store double [[ADD21]], double* [[A22]], align 8 8002 // CHECK5-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 8003 // CHECK5-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 8004 // CHECK5-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 8005 // CHECK5-NEXT: store double [[INC24]], double* [[A23]], align 8 8006 // CHECK5-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 8007 // CHECK5-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] 8008 // CHECK5-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] 8009 // CHECK5-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 8010 // CHECK5-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 8011 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 8012 // CHECK5: omp.body.continue28: 8013 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 8014 // CHECK5: omp.inner.for.inc29: 8015 // CHECK5-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8016 // CHECK5-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 8017 // CHECK5-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 8018 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP42:![0-9]+]] 8019 // CHECK5: omp.inner.for.end31: 8020 // CHECK5-NEXT: br label [[OMP_IF_END]] 8021 // CHECK5: omp_if.end: 8022 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8023 // CHECK5: omp.loop.exit: 8024 // CHECK5-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8025 // CHECK5-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 8026 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 8027 // CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8028 // CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 8029 // CHECK5-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8030 // CHECK5: .omp.final.then: 8031 // CHECK5-NEXT: store i64 400, i64* [[IT]], align 8 8032 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 8033 // CHECK5: .omp.final.done: 8034 // CHECK5-NEXT: ret void 8035 // 8036 // 8037 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 8038 // CHECK5-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 8039 // CHECK5-NEXT: entry: 8040 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8041 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8042 // CHECK5-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 8043 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8044 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8045 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8046 // CHECK5-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 8047 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8048 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8049 // CHECK5-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 8050 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8051 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8052 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8053 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 8054 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8055 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 8056 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8057 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 8058 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 8059 // CHECK5-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 8060 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8061 // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 8062 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8063 // CHECK5-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 8064 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 8065 // CHECK5-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 8066 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 8067 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 8068 // CHECK5-NEXT: ret void 8069 // 8070 // 8071 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11 8072 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 8073 // CHECK5-NEXT: entry: 8074 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8075 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8076 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8077 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8078 // CHECK5-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 8079 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8080 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8081 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 8082 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8083 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8084 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8085 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8086 // CHECK5-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 8087 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8088 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8089 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8090 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 8091 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8092 // CHECK5-NEXT: ret void 8093 // 8094 // 8095 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 8096 // CHECK5-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 8097 // CHECK5-NEXT: entry: 8098 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8099 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8100 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8101 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8102 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8103 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8104 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8105 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8106 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8107 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8108 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8109 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 8110 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8111 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 8112 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 8113 // CHECK5-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 8114 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8115 // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 8116 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8117 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 8118 // CHECK5-NEXT: ret void 8119 // 8120 // 8121 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..14 8122 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 8123 // CHECK5-NEXT: entry: 8124 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8125 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8126 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8127 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8128 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8129 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 8130 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 8131 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 8132 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 8133 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 8134 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8135 // CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8 8136 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8137 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8138 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8139 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8140 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8141 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8142 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8143 // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8144 // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 8145 // CHECK5-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 8146 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 8147 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8148 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8149 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 8150 // CHECK5-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 8151 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8152 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 8153 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8154 // CHECK5: cond.true: 8155 // CHECK5-NEXT: br label [[COND_END:%.*]] 8156 // CHECK5: cond.false: 8157 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8158 // CHECK5-NEXT: br label [[COND_END]] 8159 // CHECK5: cond.end: 8160 // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 8161 // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 8162 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 8163 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 8164 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8165 // CHECK5: omp.inner.for.cond: 8166 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 8167 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !44 8168 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 8169 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8170 // CHECK5: omp.inner.for.body: 8171 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 8172 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 8173 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 8174 // CHECK5-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !44 8175 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !44 8176 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 8177 // CHECK5-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !44 8178 // CHECK5-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !44 8179 // CHECK5-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 8180 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 8181 // CHECK5-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 8182 // CHECK5-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !44 8183 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 8184 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44 8185 // CHECK5-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 8186 // CHECK5-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44 8187 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8188 // CHECK5: omp.body.continue: 8189 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8190 // CHECK5: omp.inner.for.inc: 8191 // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 8192 // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 8193 // CHECK5-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 8194 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] 8195 // CHECK5: omp.inner.for.end: 8196 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8197 // CHECK5: omp.loop.exit: 8198 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 8199 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8200 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 8201 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8202 // CHECK5: .omp.final.then: 8203 // CHECK5-NEXT: store i64 11, i64* [[I]], align 8 8204 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] 8205 // CHECK5: .omp.final.done: 8206 // CHECK5-NEXT: ret void 8207 // 8208 // 8209 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 8210 // CHECK5-SAME: () #[[ATTR7:[0-9]+]] { 8211 // CHECK5-NEXT: entry: 8212 // CHECK5-NEXT: call void @__tgt_register_requires(i64 1) 8213 // CHECK5-NEXT: ret void 8214 // 8215 // 8216 // CHECK6-LABEL: define {{[^@]+}}@_Z7get_valv 8217 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] { 8218 // CHECK6-NEXT: entry: 8219 // CHECK6-NEXT: ret i64 0 8220 // 8221 // 8222 // CHECK6-LABEL: define {{[^@]+}}@_Z3fooi 8223 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 8224 // CHECK6-NEXT: entry: 8225 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 8226 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 8227 // CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2 8228 // CHECK6-NEXT: [[B:%.*]] = alloca [10 x float], align 4 8229 // CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 8230 // CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 8231 // CHECK6-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 8232 // CHECK6-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 8233 // CHECK6-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 8234 // CHECK6-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 8235 // CHECK6-NEXT: [[K:%.*]] = alloca i64, align 8 8236 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8237 // CHECK6-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 8238 // CHECK6-NEXT: [[LIN:%.*]] = alloca i32, align 4 8239 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8240 // CHECK6-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 8241 // CHECK6-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 8242 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 8243 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 8244 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 8245 // CHECK6-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 8246 // CHECK6-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 8247 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 8248 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 8249 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 8250 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 8251 // CHECK6-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 8252 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 8253 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 8254 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 8255 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 8256 // CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 8257 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 8258 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 8259 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 8260 // CHECK6-NEXT: store i16 0, i16* [[AA]], align 2 8261 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 8262 // CHECK6-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 8263 // CHECK6-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 8264 // CHECK6-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 8265 // CHECK6-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 8266 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 8267 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 8268 // CHECK6-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 8269 // CHECK6-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 8270 // CHECK6-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 8271 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 8272 // CHECK6-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 8273 // CHECK6-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* 8274 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 8275 // CHECK6-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) 8276 // CHECK6-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 8277 // CHECK6-NEXT: store i64 [[CALL]], i64* [[K]], align 8 8278 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 8279 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8280 // CHECK6-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 8281 // CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 8282 // CHECK6-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 8283 // CHECK6-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 8284 // CHECK6-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 8285 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] 8286 // CHECK6-NEXT: store i32 12, i32* [[LIN]], align 4 8287 // CHECK6-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 8288 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8289 // CHECK6-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 8290 // CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8291 // CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 8292 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 8293 // CHECK6-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 8294 // CHECK6-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 8295 // CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 8296 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 8297 // CHECK6-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 8298 // CHECK6-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 8299 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 8300 // CHECK6-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 8301 // CHECK6-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 8302 // CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 8303 // CHECK6-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* 8304 // CHECK6-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 8305 // CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 8306 // CHECK6-NEXT: store i8* null, i8** [[TMP25]], align 8 8307 // CHECK6-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 8308 // CHECK6-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 8309 // CHECK6-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 8310 // CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 8311 // CHECK6-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 8312 // CHECK6-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 8313 // CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 8314 // CHECK6-NEXT: store i8* null, i8** [[TMP30]], align 8 8315 // CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 8316 // CHECK6-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 8317 // CHECK6-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 8318 // CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 8319 // CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 8320 // CHECK6-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 8321 // CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 8322 // CHECK6-NEXT: store i8* null, i8** [[TMP35]], align 8 8323 // CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 8324 // CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 8325 // CHECK6-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 8326 // CHECK6-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 8327 // CHECK6-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 8328 // CHECK6: omp_offload.failed: 8329 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] 8330 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] 8331 // CHECK6: omp_offload.cont: 8332 // CHECK6-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 8333 // CHECK6-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 8334 // CHECK6-NEXT: store i32 [[TMP40]], i32* [[CONV7]], align 4 8335 // CHECK6-NEXT: [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8 8336 // CHECK6-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2 8337 // CHECK6-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 8338 // CHECK6-NEXT: store i16 [[TMP42]], i16* [[CONV9]], align 2 8339 // CHECK6-NEXT: [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 8340 // CHECK6-NEXT: [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4 8341 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10 8342 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 8343 // CHECK6: omp_if.then: 8344 // CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 8345 // CHECK6-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 8346 // CHECK6-NEXT: store i64 [[TMP41]], i64* [[TMP46]], align 8 8347 // CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 8348 // CHECK6-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* 8349 // CHECK6-NEXT: store i64 [[TMP41]], i64* [[TMP48]], align 8 8350 // CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 8351 // CHECK6-NEXT: store i8* null, i8** [[TMP49]], align 8 8352 // CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 8353 // CHECK6-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* 8354 // CHECK6-NEXT: store i64 [[TMP43]], i64* [[TMP51]], align 8 8355 // CHECK6-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 8356 // CHECK6-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* 8357 // CHECK6-NEXT: store i64 [[TMP43]], i64* [[TMP53]], align 8 8358 // CHECK6-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 8359 // CHECK6-NEXT: store i8* null, i8** [[TMP54]], align 8 8360 // CHECK6-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 8361 // CHECK6-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 8362 // CHECK6-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 8363 // CHECK6-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 8364 // CHECK6-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 8365 // CHECK6: omp_offload.failed13: 8366 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 8367 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT14]] 8368 // CHECK6: omp_offload.cont14: 8369 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 8370 // CHECK6: omp_if.else: 8371 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]] 8372 // CHECK6-NEXT: br label [[OMP_IF_END]] 8373 // CHECK6: omp_if.end: 8374 // CHECK6-NEXT: [[TMP59:%.*]] = load i32, i32* [[A]], align 4 8375 // CHECK6-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4 8376 // CHECK6-NEXT: [[TMP60:%.*]] = load i32, i32* [[A]], align 4 8377 // CHECK6-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 8378 // CHECK6-NEXT: store i32 [[TMP60]], i32* [[CONV16]], align 4 8379 // CHECK6-NEXT: [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8 8380 // CHECK6-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 8381 // CHECK6-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 8382 // CHECK6-NEXT: store i32 [[TMP62]], i32* [[CONV17]], align 4 8383 // CHECK6-NEXT: [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 8384 // CHECK6-NEXT: [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4 8385 // CHECK6-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20 8386 // CHECK6-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 8387 // CHECK6: omp_if.then19: 8388 // CHECK6-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4 8389 // CHECK6-NEXT: [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]] 8390 // CHECK6-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8 8391 // CHECK6-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 8392 // CHECK6-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 8393 // CHECK6-NEXT: store i64 [[TMP61]], i64* [[TMP69]], align 8 8394 // CHECK6-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 8395 // CHECK6-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* 8396 // CHECK6-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 8397 // CHECK6-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 8398 // CHECK6-NEXT: store i64 4, i64* [[TMP72]], align 8 8399 // CHECK6-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 8400 // CHECK6-NEXT: store i8* null, i8** [[TMP73]], align 8 8401 // CHECK6-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 8402 // CHECK6-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 8403 // CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 8404 // CHECK6-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 8405 // CHECK6-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** 8406 // CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 8407 // CHECK6-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 8408 // CHECK6-NEXT: store i64 40, i64* [[TMP78]], align 8 8409 // CHECK6-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 8410 // CHECK6-NEXT: store i8* null, i8** [[TMP79]], align 8 8411 // CHECK6-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 8412 // CHECK6-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* 8413 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 8414 // CHECK6-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 8415 // CHECK6-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* 8416 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 8417 // CHECK6-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 8418 // CHECK6-NEXT: store i64 8, i64* [[TMP84]], align 8 8419 // CHECK6-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 8420 // CHECK6-NEXT: store i8* null, i8** [[TMP85]], align 8 8421 // CHECK6-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 8422 // CHECK6-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 8423 // CHECK6-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 8424 // CHECK6-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 8425 // CHECK6-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** 8426 // CHECK6-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 8427 // CHECK6-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 8428 // CHECK6-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 8429 // CHECK6-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 8430 // CHECK6-NEXT: store i8* null, i8** [[TMP91]], align 8 8431 // CHECK6-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 8432 // CHECK6-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 8433 // CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 8434 // CHECK6-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 8435 // CHECK6-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** 8436 // CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 8437 // CHECK6-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 8438 // CHECK6-NEXT: store i64 400, i64* [[TMP96]], align 8 8439 // CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 8440 // CHECK6-NEXT: store i8* null, i8** [[TMP97]], align 8 8441 // CHECK6-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 8442 // CHECK6-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* 8443 // CHECK6-NEXT: store i64 5, i64* [[TMP99]], align 8 8444 // CHECK6-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 8445 // CHECK6-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* 8446 // CHECK6-NEXT: store i64 5, i64* [[TMP101]], align 8 8447 // CHECK6-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 8448 // CHECK6-NEXT: store i64 8, i64* [[TMP102]], align 8 8449 // CHECK6-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 8450 // CHECK6-NEXT: store i8* null, i8** [[TMP103]], align 8 8451 // CHECK6-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 8452 // CHECK6-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* 8453 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 8454 // CHECK6-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 8455 // CHECK6-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* 8456 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 8457 // CHECK6-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 8458 // CHECK6-NEXT: store i64 8, i64* [[TMP108]], align 8 8459 // CHECK6-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 8460 // CHECK6-NEXT: store i8* null, i8** [[TMP109]], align 8 8461 // CHECK6-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 8462 // CHECK6-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 8463 // CHECK6-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 8464 // CHECK6-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 8465 // CHECK6-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** 8466 // CHECK6-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 8467 // CHECK6-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 8468 // CHECK6-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 8469 // CHECK6-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 8470 // CHECK6-NEXT: store i8* null, i8** [[TMP115]], align 8 8471 // CHECK6-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 8472 // CHECK6-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 8473 // CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 8474 // CHECK6-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 8475 // CHECK6-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** 8476 // CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 8477 // CHECK6-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 8478 // CHECK6-NEXT: store i64 16, i64* [[TMP120]], align 8 8479 // CHECK6-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 8480 // CHECK6-NEXT: store i8* null, i8** [[TMP121]], align 8 8481 // CHECK6-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 8482 // CHECK6-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* 8483 // CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 8484 // CHECK6-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 8485 // CHECK6-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* 8486 // CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 8487 // CHECK6-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 8488 // CHECK6-NEXT: store i64 4, i64* [[TMP126]], align 8 8489 // CHECK6-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 8490 // CHECK6-NEXT: store i8* null, i8** [[TMP127]], align 8 8491 // CHECK6-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 8492 // CHECK6-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 8493 // CHECK6-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 8494 // CHECK6-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 8495 // CHECK6-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 8496 // CHECK6-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 8497 // CHECK6: omp_offload.failed23: 8498 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 8499 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT24]] 8500 // CHECK6: omp_offload.cont24: 8501 // CHECK6-NEXT: br label [[OMP_IF_END26:%.*]] 8502 // CHECK6: omp_if.else25: 8503 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] 8504 // CHECK6-NEXT: br label [[OMP_IF_END26]] 8505 // CHECK6: omp_if.end26: 8506 // CHECK6-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 8507 // CHECK6-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 8508 // CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) 8509 // CHECK6-NEXT: ret i32 [[TMP133]] 8510 // 8511 // 8512 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 8513 // CHECK6-SAME: () #[[ATTR2:[0-9]+]] { 8514 // CHECK6-NEXT: entry: 8515 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 8516 // CHECK6-NEXT: ret void 8517 // 8518 // 8519 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined. 8520 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 8521 // CHECK6-NEXT: entry: 8522 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8523 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8524 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8525 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 8526 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8527 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8528 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8529 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8530 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 8531 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8532 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8533 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8534 // CHECK6-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 8535 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8536 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8537 // CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8538 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8539 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8540 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8541 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 8542 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8543 // CHECK6: cond.true: 8544 // CHECK6-NEXT: br label [[COND_END:%.*]] 8545 // CHECK6: cond.false: 8546 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8547 // CHECK6-NEXT: br label [[COND_END]] 8548 // CHECK6: cond.end: 8549 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8550 // CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8551 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8552 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8553 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8554 // CHECK6: omp.inner.for.cond: 8555 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 8556 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 8557 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8558 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8559 // CHECK6: omp.inner.for.body: 8560 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 8561 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 8562 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 8563 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10 8564 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8565 // CHECK6: omp.body.continue: 8566 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8567 // CHECK6: omp.inner.for.inc: 8568 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 8569 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 8570 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 8571 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 8572 // CHECK6: omp.inner.for.end: 8573 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8574 // CHECK6: omp.loop.exit: 8575 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8576 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8577 // CHECK6-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 8578 // CHECK6-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8579 // CHECK6: .omp.final.then: 8580 // CHECK6-NEXT: store i32 33, i32* [[I]], align 4 8581 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 8582 // CHECK6: .omp.final.done: 8583 // CHECK6-NEXT: ret void 8584 // 8585 // 8586 // CHECK6-LABEL: define {{[^@]+}}@.omp_task_entry. 8587 // CHECK6-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 8588 // CHECK6-NEXT: entry: 8589 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 8590 // CHECK6-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 8591 // CHECK6-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 8592 // CHECK6-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 8593 // CHECK6-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 8594 // CHECK6-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 8595 // CHECK6-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 8596 // CHECK6-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 8597 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 8598 // CHECK6-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 8599 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 8600 // CHECK6-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 8601 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 8602 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 8603 // CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 8604 // CHECK6-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 8605 // CHECK6-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 8606 // CHECK6-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 8607 // CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 8608 // CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 8609 // CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 8610 // CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 8611 // CHECK6-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 8612 // CHECK6-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 8613 // CHECK6-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 8614 // CHECK6-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 8615 // CHECK6-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 8616 // CHECK6-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 8617 // CHECK6-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 8618 // CHECK6-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 8619 // CHECK6-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8620 // CHECK6-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 8621 // CHECK6: omp_offload.failed.i: 8622 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 8623 // CHECK6-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 8624 // CHECK6: .omp_outlined..1.exit: 8625 // CHECK6-NEXT: ret i32 0 8626 // 8627 // 8628 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 8629 // CHECK6-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 8630 // CHECK6-NEXT: entry: 8631 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8632 // CHECK6-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 8633 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8634 // CHECK6-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 8635 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8636 // CHECK6-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 8637 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8638 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 8639 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8640 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 8641 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 8642 // CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 8643 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 8644 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 8645 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 8646 // CHECK6-NEXT: ret void 8647 // 8648 // 8649 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2 8650 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 8651 // CHECK6-NEXT: entry: 8652 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8653 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8654 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8655 // CHECK6-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 8656 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8657 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 8658 // CHECK6-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 8659 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8660 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8661 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8662 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8663 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 8664 // CHECK6-NEXT: [[K1:%.*]] = alloca i64, align 8 8665 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8666 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8667 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8668 // CHECK6-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 8669 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8670 // CHECK6-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 8671 // CHECK6-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 8672 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8673 // CHECK6-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 8674 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8675 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8676 // CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8677 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 8678 // CHECK6-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 8679 // CHECK6-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 8680 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 8681 // CHECK6: omp.dispatch.cond: 8682 // CHECK6-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 8683 // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 8684 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 8685 // CHECK6: omp.dispatch.body: 8686 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8687 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8688 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8689 // CHECK6: omp.inner.for.cond: 8690 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 8691 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26 8692 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8693 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8694 // CHECK6: omp.inner.for.body: 8695 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 8696 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 8697 // CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 8698 // CHECK6-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26 8699 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26 8700 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 8701 // CHECK6-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 8702 // CHECK6-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 8703 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 8704 // CHECK6-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26 8705 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 8706 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 8707 // CHECK6-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !26 8708 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8709 // CHECK6: omp.body.continue: 8710 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8711 // CHECK6: omp.inner.for.inc: 8712 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 8713 // CHECK6-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 8714 // CHECK6-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 8715 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 8716 // CHECK6: omp.inner.for.end: 8717 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 8718 // CHECK6: omp.dispatch.inc: 8719 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]] 8720 // CHECK6: omp.dispatch.end: 8721 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8722 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 8723 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8724 // CHECK6: .omp.final.then: 8725 // CHECK6-NEXT: store i32 1, i32* [[I]], align 4 8726 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 8727 // CHECK6: .omp.final.done: 8728 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8729 // CHECK6-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 8730 // CHECK6-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 8731 // CHECK6: .omp.linear.pu: 8732 // CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 8733 // CHECK6-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 8734 // CHECK6-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 8735 // CHECK6: .omp.linear.pu.done: 8736 // CHECK6-NEXT: ret void 8737 // 8738 // 8739 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 8740 // CHECK6-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 8741 // CHECK6-NEXT: entry: 8742 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8743 // CHECK6-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 8744 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8745 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8746 // CHECK6-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 8747 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8748 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8749 // CHECK6-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 8750 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8751 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8752 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 8753 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8754 // CHECK6-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 8755 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8756 // CHECK6-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 8757 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8758 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 8759 // CHECK6-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 8760 // CHECK6-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 8761 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 8762 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 8763 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8764 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 8765 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 8766 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 8767 // CHECK6-NEXT: ret void 8768 // 8769 // 8770 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3 8771 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 8772 // CHECK6-NEXT: entry: 8773 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8774 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8775 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8776 // CHECK6-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 8777 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8778 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 8779 // CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8 8780 // CHECK6-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 8781 // CHECK6-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 8782 // CHECK6-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 8783 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 8784 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 8785 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 8786 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8787 // CHECK6-NEXT: [[IT:%.*]] = alloca i64, align 8 8788 // CHECK6-NEXT: [[LIN4:%.*]] = alloca i32, align 4 8789 // CHECK6-NEXT: [[A5:%.*]] = alloca i32, align 4 8790 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8791 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8792 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8793 // CHECK6-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 8794 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8795 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8796 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 8797 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8798 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 8799 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 8800 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 8801 // CHECK6-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 8802 // CHECK6-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 8803 // CHECK6-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 8804 // CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 8805 // CHECK6-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 8806 // CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 8807 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8808 // CHECK6-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8809 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8810 // CHECK6-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 8811 // CHECK6-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 8812 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8813 // CHECK6-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 8814 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8815 // CHECK6: cond.true: 8816 // CHECK6-NEXT: br label [[COND_END:%.*]] 8817 // CHECK6: cond.false: 8818 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8819 // CHECK6-NEXT: br label [[COND_END]] 8820 // CHECK6: cond.end: 8821 // CHECK6-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8822 // CHECK6-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 8823 // CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 8824 // CHECK6-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 8825 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8826 // CHECK6: omp.inner.for.cond: 8827 // CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8828 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 8829 // CHECK6-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 8830 // CHECK6-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8831 // CHECK6: omp.inner.for.body: 8832 // CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8833 // CHECK6-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 8834 // CHECK6-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 8835 // CHECK6-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29 8836 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29 8837 // CHECK6-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 8838 // CHECK6-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8839 // CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 8840 // CHECK6-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 8841 // CHECK6-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 8842 // CHECK6-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 8843 // CHECK6-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29 8844 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29 8845 // CHECK6-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 8846 // CHECK6-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8847 // CHECK6-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29 8848 // CHECK6-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 8849 // CHECK6-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 8850 // CHECK6-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 8851 // CHECK6-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29 8852 // CHECK6-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !29 8853 // CHECK6-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 8854 // CHECK6-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 8855 // CHECK6-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 8856 // CHECK6-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !29 8857 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8858 // CHECK6: omp.body.continue: 8859 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8860 // CHECK6: omp.inner.for.inc: 8861 // CHECK6-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8862 // CHECK6-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 8863 // CHECK6-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 8864 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 8865 // CHECK6: omp.inner.for.end: 8866 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8867 // CHECK6: omp.loop.exit: 8868 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8869 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8870 // CHECK6-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 8871 // CHECK6-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8872 // CHECK6: .omp.final.then: 8873 // CHECK6-NEXT: store i64 400, i64* [[IT]], align 8 8874 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 8875 // CHECK6: .omp.final.done: 8876 // CHECK6-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8877 // CHECK6-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 8878 // CHECK6-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 8879 // CHECK6: .omp.linear.pu: 8880 // CHECK6-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 8881 // CHECK6-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 8882 // CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 8883 // CHECK6-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 8884 // CHECK6-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 8885 // CHECK6: .omp.linear.pu.done: 8886 // CHECK6-NEXT: ret void 8887 // 8888 // 8889 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 8890 // CHECK6-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 8891 // CHECK6-NEXT: entry: 8892 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8893 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8894 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8895 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8896 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8897 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8898 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8899 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8900 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 8901 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8902 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 8903 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 8904 // CHECK6-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 8905 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8906 // CHECK6-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 8907 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8908 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 8909 // CHECK6-NEXT: ret void 8910 // 8911 // 8912 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..4 8913 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 8914 // CHECK6-NEXT: entry: 8915 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8916 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8917 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8918 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8919 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8920 // CHECK6-NEXT: [[TMP:%.*]] = alloca i16, align 2 8921 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8922 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8923 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8924 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8925 // CHECK6-NEXT: [[IT:%.*]] = alloca i16, align 2 8926 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8927 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8928 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8929 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8930 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8931 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8932 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8933 // CHECK6-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 8934 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8935 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8936 // CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8937 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8938 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8939 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8940 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 8941 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8942 // CHECK6: cond.true: 8943 // CHECK6-NEXT: br label [[COND_END:%.*]] 8944 // CHECK6: cond.false: 8945 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8946 // CHECK6-NEXT: br label [[COND_END]] 8947 // CHECK6: cond.end: 8948 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8949 // CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8950 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8951 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8952 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8953 // CHECK6: omp.inner.for.cond: 8954 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 8955 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32 8956 // CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8957 // CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8958 // CHECK6: omp.inner.for.body: 8959 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 8960 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 8961 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 8962 // CHECK6-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 8963 // CHECK6-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32 8964 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 8965 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 8966 // CHECK6-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !32 8967 // CHECK6-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 8968 // CHECK6-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 8969 // CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 8970 // CHECK6-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 8971 // CHECK6-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !32 8972 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8973 // CHECK6: omp.body.continue: 8974 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8975 // CHECK6: omp.inner.for.inc: 8976 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 8977 // CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 8978 // CHECK6-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 8979 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 8980 // CHECK6: omp.inner.for.end: 8981 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8982 // CHECK6: omp.loop.exit: 8983 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8984 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8985 // CHECK6-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8986 // CHECK6-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8987 // CHECK6: .omp.final.then: 8988 // CHECK6-NEXT: store i16 22, i16* [[IT]], align 2 8989 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 8990 // CHECK6: .omp.final.done: 8991 // CHECK6-NEXT: ret void 8992 // 8993 // 8994 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 8995 // CHECK6-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 8996 // CHECK6-NEXT: entry: 8997 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8998 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 8999 // CHECK6-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 9000 // CHECK6-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 9001 // CHECK6-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 9002 // CHECK6-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 9003 // CHECK6-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 9004 // CHECK6-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 9005 // CHECK6-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 9006 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 9007 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 9008 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 9009 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9010 // CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 9011 // CHECK6-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 9012 // CHECK6-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 9013 // CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 9014 // CHECK6-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 9015 // CHECK6-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 9016 // CHECK6-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 9017 // CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 9018 // CHECK6-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 9019 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9020 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 9021 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 9022 // CHECK6-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 9023 // CHECK6-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 9024 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 9025 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 9026 // CHECK6-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 9027 // CHECK6-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 9028 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 9029 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 9030 // CHECK6-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 9031 // CHECK6-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 9032 // CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 9033 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 9034 // CHECK6-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 9035 // CHECK6-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 9036 // CHECK6-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 9037 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 9038 // CHECK6-NEXT: ret void 9039 // 9040 // 9041 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..7 9042 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 9043 // CHECK6-NEXT: entry: 9044 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9045 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9046 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 9047 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 9048 // CHECK6-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 9049 // CHECK6-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 9050 // CHECK6-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 9051 // CHECK6-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 9052 // CHECK6-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 9053 // CHECK6-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 9054 // CHECK6-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 9055 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 9056 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9057 // CHECK6-NEXT: [[TMP:%.*]] = alloca i8, align 1 9058 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9059 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9060 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9061 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9062 // CHECK6-NEXT: [[IT:%.*]] = alloca i8, align 1 9063 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9064 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9065 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9066 // CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 9067 // CHECK6-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 9068 // CHECK6-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 9069 // CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 9070 // CHECK6-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 9071 // CHECK6-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 9072 // CHECK6-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 9073 // CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 9074 // CHECK6-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 9075 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9076 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 9077 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 9078 // CHECK6-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 9079 // CHECK6-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 9080 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 9081 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 9082 // CHECK6-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 9083 // CHECK6-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 9084 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 9085 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9086 // CHECK6-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 9087 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9088 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9089 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 9090 // CHECK6-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9091 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 9092 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 9093 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 9094 // CHECK6: omp.dispatch.cond: 9095 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9096 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 9097 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9098 // CHECK6: cond.true: 9099 // CHECK6-NEXT: br label [[COND_END:%.*]] 9100 // CHECK6: cond.false: 9101 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9102 // CHECK6-NEXT: br label [[COND_END]] 9103 // CHECK6: cond.end: 9104 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 9105 // CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9106 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9107 // CHECK6-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 9108 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9109 // CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9110 // CHECK6-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 9111 // CHECK6-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 9112 // CHECK6: omp.dispatch.body: 9113 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9114 // CHECK6: omp.inner.for.cond: 9115 // CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 9116 // CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 9117 // CHECK6-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 9118 // CHECK6-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9119 // CHECK6: omp.inner.for.body: 9120 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 9121 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 9122 // CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 9123 // CHECK6-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 9124 // CHECK6-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35 9125 // CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !35 9126 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 9127 // CHECK6-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !35 9128 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 9129 // CHECK6-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35 9130 // CHECK6-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 9131 // CHECK6-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 9132 // CHECK6-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 9133 // CHECK6-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35 9134 // CHECK6-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 9135 // CHECK6-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 9136 // CHECK6-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 9137 // CHECK6-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 9138 // CHECK6-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 9139 // CHECK6-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35 9140 // CHECK6-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 9141 // CHECK6-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 9142 // CHECK6-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 9143 // CHECK6-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 9144 // CHECK6-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35 9145 // CHECK6-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 9146 // CHECK6-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 9147 // CHECK6-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 9148 // CHECK6-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 9149 // CHECK6-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 9150 // CHECK6-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35 9151 // CHECK6-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 9152 // CHECK6-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35 9153 // CHECK6-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 9154 // CHECK6-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35 9155 // CHECK6-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 9156 // CHECK6-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35 9157 // CHECK6-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 9158 // CHECK6-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 9159 // CHECK6-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 9160 // CHECK6-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35 9161 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9162 // CHECK6: omp.body.continue: 9163 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9164 // CHECK6: omp.inner.for.inc: 9165 // CHECK6-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 9166 // CHECK6-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 9167 // CHECK6-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 9168 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 9169 // CHECK6: omp.inner.for.end: 9170 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 9171 // CHECK6: omp.dispatch.inc: 9172 // CHECK6-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9173 // CHECK6-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 9174 // CHECK6-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 9175 // CHECK6-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 9176 // CHECK6-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9177 // CHECK6-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 9178 // CHECK6-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 9179 // CHECK6-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 9180 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]] 9181 // CHECK6: omp.dispatch.end: 9182 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 9183 // CHECK6-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9184 // CHECK6-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 9185 // CHECK6-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9186 // CHECK6: .omp.final.then: 9187 // CHECK6-NEXT: store i8 96, i8* [[IT]], align 1 9188 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 9189 // CHECK6: .omp.final.done: 9190 // CHECK6-NEXT: ret void 9191 // 9192 // 9193 // CHECK6-LABEL: define {{[^@]+}}@_Z3bari 9194 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 9195 // CHECK6-NEXT: entry: 9196 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 9197 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 9198 // CHECK6-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 9199 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 9200 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 9201 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 9202 // CHECK6-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 9203 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 9204 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 9205 // CHECK6-NEXT: store i32 [[ADD]], i32* [[A]], align 4 9206 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 9207 // CHECK6-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 9208 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 9209 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 9210 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 9211 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 9212 // CHECK6-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 9213 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 9214 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 9215 // CHECK6-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 9216 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 9217 // CHECK6-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 9218 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 9219 // CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 9220 // CHECK6-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 9221 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 9222 // CHECK6-NEXT: ret i32 [[TMP8]] 9223 // 9224 // 9225 // CHECK6-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 9226 // CHECK6-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 9227 // CHECK6-NEXT: entry: 9228 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 9229 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 9230 // CHECK6-NEXT: [[B:%.*]] = alloca i32, align 4 9231 // CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 9232 // CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 9233 // CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 9234 // CHECK6-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 9235 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 9236 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 9237 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 9238 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 9239 // CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 9240 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 9241 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 9242 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 9243 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 9244 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 9245 // CHECK6-NEXT: store i32 [[ADD]], i32* [[B]], align 4 9246 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 9247 // CHECK6-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 9248 // CHECK6-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 9249 // CHECK6-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 9250 // CHECK6-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 9251 // CHECK6-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 9252 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 9253 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 9254 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 9255 // CHECK6-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 9256 // CHECK6-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 9257 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[B]], align 4 9258 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 9259 // CHECK6-NEXT: store i32 [[TMP6]], i32* [[CONV]], align 4 9260 // CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8 9261 // CHECK6-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 9262 // CHECK6-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 9263 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 9264 // CHECK6-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8 9265 // CHECK6-NEXT: store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1 9266 // CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 9267 // CHECK6-NEXT: [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 9268 // CHECK6-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1 9269 // CHECK6-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9270 // CHECK6: omp_if.then: 9271 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 9272 // CHECK6-NEXT: [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]] 9273 // CHECK6-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 9274 // CHECK6-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9275 // CHECK6-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1** 9276 // CHECK6-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 8 9277 // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9278 // CHECK6-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** 9279 // CHECK6-NEXT: store double* [[A]], double** [[TMP16]], align 8 9280 // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 9281 // CHECK6-NEXT: store i64 8, i64* [[TMP17]], align 8 9282 // CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 9283 // CHECK6-NEXT: store i8* null, i8** [[TMP18]], align 8 9284 // CHECK6-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 9285 // CHECK6-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 9286 // CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 9287 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 9288 // CHECK6-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 9289 // CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 9290 // CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 9291 // CHECK6-NEXT: store i64 4, i64* [[TMP23]], align 8 9292 // CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 9293 // CHECK6-NEXT: store i8* null, i8** [[TMP24]], align 8 9294 // CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 9295 // CHECK6-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* 9296 // CHECK6-NEXT: store i64 2, i64* [[TMP26]], align 8 9297 // CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 9298 // CHECK6-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* 9299 // CHECK6-NEXT: store i64 2, i64* [[TMP28]], align 8 9300 // CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 9301 // CHECK6-NEXT: store i64 8, i64* [[TMP29]], align 8 9302 // CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 9303 // CHECK6-NEXT: store i8* null, i8** [[TMP30]], align 8 9304 // CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 9305 // CHECK6-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 9306 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 9307 // CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 9308 // CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* 9309 // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 9310 // CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 9311 // CHECK6-NEXT: store i64 8, i64* [[TMP35]], align 8 9312 // CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 9313 // CHECK6-NEXT: store i8* null, i8** [[TMP36]], align 8 9314 // CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 9315 // CHECK6-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** 9316 // CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 9317 // CHECK6-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 9318 // CHECK6-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** 9319 // CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 9320 // CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 9321 // CHECK6-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 9322 // CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 9323 // CHECK6-NEXT: store i8* null, i8** [[TMP42]], align 8 9324 // CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 9325 // CHECK6-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* 9326 // CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 9327 // CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 9328 // CHECK6-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* 9329 // CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 9330 // CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 9331 // CHECK6-NEXT: store i64 1, i64* [[TMP47]], align 8 9332 // CHECK6-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 9333 // CHECK6-NEXT: store i8* null, i8** [[TMP48]], align 8 9334 // CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9335 // CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9336 // CHECK6-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 9337 // CHECK6-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 9338 // CHECK6-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP52]] to i1 9339 // CHECK6-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 9340 // CHECK6-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) 9341 // CHECK6-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 9342 // CHECK6-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 9343 // CHECK6: omp_offload.failed: 9344 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] 9345 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] 9346 // CHECK6: omp_offload.cont: 9347 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 9348 // CHECK6: omp_if.else: 9349 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] 9350 // CHECK6-NEXT: br label [[OMP_IF_END]] 9351 // CHECK6: omp_if.end: 9352 // CHECK6-NEXT: [[TMP56:%.*]] = mul nsw i64 1, [[TMP2]] 9353 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP56]] 9354 // CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 9355 // CHECK6-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 9356 // CHECK6-NEXT: [[CONV7:%.*]] = sext i16 [[TMP57]] to i32 9357 // CHECK6-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 9358 // CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP58]] 9359 // CHECK6-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 9360 // CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 9361 // CHECK6-NEXT: ret i32 [[ADD8]] 9362 // 9363 // 9364 // CHECK6-LABEL: define {{[^@]+}}@_ZL7fstatici 9365 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 9366 // CHECK6-NEXT: entry: 9367 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 9368 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 9369 // CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2 9370 // CHECK6-NEXT: [[AAA:%.*]] = alloca i8, align 1 9371 // CHECK6-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 9372 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 9373 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 9374 // CHECK6-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 9375 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 9376 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 9377 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 9378 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 9379 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 9380 // CHECK6-NEXT: store i16 0, i16* [[AA]], align 2 9381 // CHECK6-NEXT: store i8 0, i8* [[AAA]], align 1 9382 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 9383 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 9384 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 9385 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 9386 // CHECK6-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 9387 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 9388 // CHECK6-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 9389 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 9390 // CHECK6-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 9391 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 9392 // CHECK6-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 9393 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 9394 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 9395 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 9396 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9397 // CHECK6: omp_if.then: 9398 // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9399 // CHECK6-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 9400 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 9401 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9402 // CHECK6-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 9403 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 9404 // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 9405 // CHECK6-NEXT: store i8* null, i8** [[TMP11]], align 8 9406 // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 9407 // CHECK6-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 9408 // CHECK6-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 9409 // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 9410 // CHECK6-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 9411 // CHECK6-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 9412 // CHECK6-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 9413 // CHECK6-NEXT: store i8* null, i8** [[TMP16]], align 8 9414 // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 9415 // CHECK6-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 9416 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 9417 // CHECK6-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 9418 // CHECK6-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 9419 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 9420 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 9421 // CHECK6-NEXT: store i8* null, i8** [[TMP21]], align 8 9422 // CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 9423 // CHECK6-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 9424 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 9425 // CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 9426 // CHECK6-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 9427 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 9428 // CHECK6-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 9429 // CHECK6-NEXT: store i8* null, i8** [[TMP26]], align 8 9430 // CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9431 // CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9432 // CHECK6-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 9433 // CHECK6-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 9434 // CHECK6-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 9435 // CHECK6: omp_offload.failed: 9436 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 9437 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] 9438 // CHECK6: omp_offload.cont: 9439 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 9440 // CHECK6: omp_if.else: 9441 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 9442 // CHECK6-NEXT: br label [[OMP_IF_END]] 9443 // CHECK6: omp_if.end: 9444 // CHECK6-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 9445 // CHECK6-NEXT: ret i32 [[TMP31]] 9446 // 9447 // 9448 // CHECK6-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 9449 // CHECK6-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 9450 // CHECK6-NEXT: entry: 9451 // CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 9452 // CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4 9453 // CHECK6-NEXT: [[AA:%.*]] = alloca i16, align 2 9454 // CHECK6-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 9455 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 9456 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 9457 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 9458 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 9459 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 9460 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 9461 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 9462 // CHECK6-NEXT: store i16 0, i16* [[AA]], align 2 9463 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 9464 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 9465 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 9466 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 9467 // CHECK6-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 9468 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 9469 // CHECK6-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 9470 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 9471 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 9472 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 9473 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9474 // CHECK6: omp_if.then: 9475 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9476 // CHECK6-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 9477 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 9478 // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9479 // CHECK6-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 9480 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 9481 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 9482 // CHECK6-NEXT: store i8* null, i8** [[TMP9]], align 8 9483 // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 9484 // CHECK6-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 9485 // CHECK6-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 9486 // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 9487 // CHECK6-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 9488 // CHECK6-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 9489 // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 9490 // CHECK6-NEXT: store i8* null, i8** [[TMP14]], align 8 9491 // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 9492 // CHECK6-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 9493 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 9494 // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 9495 // CHECK6-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 9496 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 9497 // CHECK6-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 9498 // CHECK6-NEXT: store i8* null, i8** [[TMP19]], align 8 9499 // CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9500 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9501 // CHECK6-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 9502 // CHECK6-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 9503 // CHECK6-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 9504 // CHECK6: omp_offload.failed: 9505 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 9506 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] 9507 // CHECK6: omp_offload.cont: 9508 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 9509 // CHECK6: omp_if.else: 9510 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 9511 // CHECK6-NEXT: br label [[OMP_IF_END]] 9512 // CHECK6: omp_if.end: 9513 // CHECK6-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 9514 // CHECK6-NEXT: ret i32 [[TMP24]] 9515 // 9516 // 9517 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 9518 // CHECK6-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 9519 // CHECK6-NEXT: entry: 9520 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 9521 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 9522 // CHECK6-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 9523 // CHECK6-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 9524 // CHECK6-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 9525 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 9526 // CHECK6-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 9527 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 9528 // CHECK6-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 9529 // CHECK6-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 9530 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 9531 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 9532 // CHECK6-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 9533 // CHECK6-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 9534 // CHECK6-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 9535 // CHECK6-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 9536 // CHECK6-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 9537 // CHECK6-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 9538 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 9539 // CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 9540 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 9541 // CHECK6-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 9542 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 9543 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 9544 // CHECK6-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* 9545 // CHECK6-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 9546 // CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 9547 // CHECK6-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 8 9548 // CHECK6-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 9549 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 9550 // CHECK6-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 9551 // CHECK6-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 9552 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 9553 // CHECK6-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 8 9554 // CHECK6-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 9555 // CHECK6-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9556 // CHECK6: omp_if.then: 9557 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) 9558 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 9559 // CHECK6: omp_if.else: 9560 // CHECK6-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 9561 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 9562 // CHECK6-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 9563 // CHECK6-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] 9564 // CHECK6-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 9565 // CHECK6-NEXT: br label [[OMP_IF_END]] 9566 // CHECK6: omp_if.end: 9567 // CHECK6-NEXT: ret void 9568 // 9569 // 9570 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..9 9571 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 9572 // CHECK6-NEXT: entry: 9573 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9574 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9575 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 9576 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 9577 // CHECK6-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 9578 // CHECK6-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 9579 // CHECK6-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 9580 // CHECK6-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 9581 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9582 // CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8 9583 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9584 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9585 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9586 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9587 // CHECK6-NEXT: [[IT:%.*]] = alloca i64, align 8 9588 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9589 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9590 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 9591 // CHECK6-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 9592 // CHECK6-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 9593 // CHECK6-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 9594 // CHECK6-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 9595 // CHECK6-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 9596 // CHECK6-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 9597 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 9598 // CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 9599 // CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 9600 // CHECK6-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 9601 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 9602 // CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9603 // CHECK6-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 9604 // CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9605 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9606 // CHECK6-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 8 9607 // CHECK6-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 9608 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9609 // CHECK6: omp_if.then: 9610 // CHECK6-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9611 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 9612 // CHECK6-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9613 // CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9614 // CHECK6-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 9615 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9616 // CHECK6: cond.true: 9617 // CHECK6-NEXT: br label [[COND_END:%.*]] 9618 // CHECK6: cond.false: 9619 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9620 // CHECK6-NEXT: br label [[COND_END]] 9621 // CHECK6: cond.end: 9622 // CHECK6-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 9623 // CHECK6-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9624 // CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9625 // CHECK6-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 9626 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9627 // CHECK6: omp.inner.for.cond: 9628 // CHECK6-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 9629 // CHECK6-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38 9630 // CHECK6-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 9631 // CHECK6-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9632 // CHECK6: omp.inner.for.body: 9633 // CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 9634 // CHECK6-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 9635 // CHECK6-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 9636 // CHECK6-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38 9637 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !38 9638 // CHECK6-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double 9639 // CHECK6-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 9640 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 9641 // CHECK6-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !39, !llvm.access.group !38 9642 // CHECK6-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 9643 // CHECK6-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38 9644 // CHECK6-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 9645 // CHECK6-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38 9646 // CHECK6-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 9647 // CHECK6-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] 9648 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] 9649 // CHECK6-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 9650 // CHECK6-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38 9651 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9652 // CHECK6: omp.body.continue: 9653 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9654 // CHECK6: omp.inner.for.inc: 9655 // CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 9656 // CHECK6-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 9657 // CHECK6-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38 9658 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 9659 // CHECK6: omp.inner.for.end: 9660 // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] 9661 // CHECK6: omp_if.else: 9662 // CHECK6-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9663 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 9664 // CHECK6-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9665 // CHECK6-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9666 // CHECK6-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 9667 // CHECK6-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] 9668 // CHECK6: cond.true11: 9669 // CHECK6-NEXT: br label [[COND_END13:%.*]] 9670 // CHECK6: cond.false12: 9671 // CHECK6-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9672 // CHECK6-NEXT: br label [[COND_END13]] 9673 // CHECK6: cond.end13: 9674 // CHECK6-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] 9675 // CHECK6-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 9676 // CHECK6-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9677 // CHECK6-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 9678 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] 9679 // CHECK6: omp.inner.for.cond15: 9680 // CHECK6-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9681 // CHECK6-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9682 // CHECK6-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 9683 // CHECK6-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 9684 // CHECK6: omp.inner.for.body17: 9685 // CHECK6-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9686 // CHECK6-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 9687 // CHECK6-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] 9688 // CHECK6-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 9689 // CHECK6-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 8 9690 // CHECK6-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double 9691 // CHECK6-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 9692 // CHECK6-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 9693 // CHECK6-NEXT: store double [[ADD21]], double* [[A22]], align 8 9694 // CHECK6-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 9695 // CHECK6-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 9696 // CHECK6-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 9697 // CHECK6-NEXT: store double [[INC24]], double* [[A23]], align 8 9698 // CHECK6-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 9699 // CHECK6-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] 9700 // CHECK6-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] 9701 // CHECK6-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 9702 // CHECK6-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 9703 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 9704 // CHECK6: omp.body.continue28: 9705 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 9706 // CHECK6: omp.inner.for.inc29: 9707 // CHECK6-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9708 // CHECK6-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 9709 // CHECK6-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 9710 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP42:![0-9]+]] 9711 // CHECK6: omp.inner.for.end31: 9712 // CHECK6-NEXT: br label [[OMP_IF_END]] 9713 // CHECK6: omp_if.end: 9714 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9715 // CHECK6: omp.loop.exit: 9716 // CHECK6-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9717 // CHECK6-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 9718 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 9719 // CHECK6-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9720 // CHECK6-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 9721 // CHECK6-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9722 // CHECK6: .omp.final.then: 9723 // CHECK6-NEXT: store i64 400, i64* [[IT]], align 8 9724 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 9725 // CHECK6: .omp.final.done: 9726 // CHECK6-NEXT: ret void 9727 // 9728 // 9729 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 9730 // CHECK6-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 9731 // CHECK6-NEXT: entry: 9732 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 9733 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 9734 // CHECK6-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 9735 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 9736 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 9737 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 9738 // CHECK6-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 9739 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9740 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 9741 // CHECK6-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 9742 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 9743 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9744 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 9745 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 9746 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 9747 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 9748 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 9749 // CHECK6-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 9750 // CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 9751 // CHECK6-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 9752 // CHECK6-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 9753 // CHECK6-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 9754 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 9755 // CHECK6-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 9756 // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 9757 // CHECK6-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 9758 // CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 9759 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 9760 // CHECK6-NEXT: ret void 9761 // 9762 // 9763 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11 9764 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 9765 // CHECK6-NEXT: entry: 9766 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9767 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9768 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 9769 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 9770 // CHECK6-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 9771 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 9772 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9773 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 9774 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9775 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9776 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9777 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 9778 // CHECK6-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 9779 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 9780 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9781 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 9782 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 9783 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 9784 // CHECK6-NEXT: ret void 9785 // 9786 // 9787 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 9788 // CHECK6-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 9789 // CHECK6-NEXT: entry: 9790 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 9791 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 9792 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 9793 // CHECK6-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 9794 // CHECK6-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 9795 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9796 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 9797 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 9798 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9799 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 9800 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 9801 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 9802 // CHECK6-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 9803 // CHECK6-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 9804 // CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 9805 // CHECK6-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 9806 // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 9807 // CHECK6-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 9808 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 9809 // CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 9810 // CHECK6-NEXT: ret void 9811 // 9812 // 9813 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..14 9814 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 9815 // CHECK6-NEXT: entry: 9816 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9817 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9818 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 9819 // CHECK6-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 9820 // CHECK6-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 9821 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9822 // CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8 9823 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9824 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9825 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9826 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9827 // CHECK6-NEXT: [[I:%.*]] = alloca i64, align 8 9828 // CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9829 // CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9830 // CHECK6-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 9831 // CHECK6-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 9832 // CHECK6-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 9833 // CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 9834 // CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 9835 // CHECK6-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 9836 // CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9837 // CHECK6-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 9838 // CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9839 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9840 // CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9841 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 9842 // CHECK6-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9843 // CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9844 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 9845 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9846 // CHECK6: cond.true: 9847 // CHECK6-NEXT: br label [[COND_END:%.*]] 9848 // CHECK6: cond.false: 9849 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9850 // CHECK6-NEXT: br label [[COND_END]] 9851 // CHECK6: cond.end: 9852 // CHECK6-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 9853 // CHECK6-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9854 // CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9855 // CHECK6-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 9856 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9857 // CHECK6: omp.inner.for.cond: 9858 // CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 9859 // CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !44 9860 // CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 9861 // CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9862 // CHECK6: omp.inner.for.body: 9863 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 9864 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 9865 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 9866 // CHECK6-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !44 9867 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !44 9868 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 9869 // CHECK6-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !44 9870 // CHECK6-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !44 9871 // CHECK6-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 9872 // CHECK6-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 9873 // CHECK6-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 9874 // CHECK6-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !44 9875 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 9876 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44 9877 // CHECK6-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 9878 // CHECK6-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44 9879 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9880 // CHECK6: omp.body.continue: 9881 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9882 // CHECK6: omp.inner.for.inc: 9883 // CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 9884 // CHECK6-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 9885 // CHECK6-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44 9886 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] 9887 // CHECK6: omp.inner.for.end: 9888 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9889 // CHECK6: omp.loop.exit: 9890 // CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 9891 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9892 // CHECK6-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 9893 // CHECK6-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9894 // CHECK6: .omp.final.then: 9895 // CHECK6-NEXT: store i64 11, i64* [[I]], align 8 9896 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]] 9897 // CHECK6: .omp.final.done: 9898 // CHECK6-NEXT: ret void 9899 // 9900 // 9901 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 9902 // CHECK6-SAME: () #[[ATTR7:[0-9]+]] { 9903 // CHECK6-NEXT: entry: 9904 // CHECK6-NEXT: call void @__tgt_register_requires(i64 1) 9905 // CHECK6-NEXT: ret void 9906 // 9907 // 9908 // CHECK7-LABEL: define {{[^@]+}}@_Z7get_valv 9909 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { 9910 // CHECK7-NEXT: entry: 9911 // CHECK7-NEXT: ret i64 0 9912 // 9913 // 9914 // CHECK7-LABEL: define {{[^@]+}}@_Z3fooi 9915 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 9916 // CHECK7-NEXT: entry: 9917 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 9918 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 9919 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 9920 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x float], align 4 9921 // CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 9922 // CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 9923 // CHECK7-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 9924 // CHECK7-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 9925 // CHECK7-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 9926 // CHECK7-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 9927 // CHECK7-NEXT: [[K:%.*]] = alloca i64, align 8 9928 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9929 // CHECK7-NEXT: [[LIN:%.*]] = alloca i32, align 4 9930 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9931 // CHECK7-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 9932 // CHECK7-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 9933 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 9934 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 9935 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 9936 // CHECK7-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 9937 // CHECK7-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 9938 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 9939 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 9940 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 9941 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 9942 // CHECK7-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 9943 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 9944 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 9945 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 9946 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 9947 // CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 9948 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 9949 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 9950 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 9951 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 9952 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 9953 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 9954 // CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 9955 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 9956 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 9957 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 9958 // CHECK7-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 9959 // CHECK7-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 9960 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 9961 // CHECK7-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 9962 // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* 9963 // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 9964 // CHECK7-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) 9965 // CHECK7-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 9966 // CHECK7-NEXT: store i64 [[CALL]], i64* [[K]], align 8 9967 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 9968 // CHECK7-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 9969 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 9970 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] 9971 // CHECK7-NEXT: store i32 12, i32* [[LIN]], align 4 9972 // CHECK7-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 9973 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9974 // CHECK7-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 9975 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9976 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 9977 // CHECK7-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 9978 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 9979 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 9980 // CHECK7-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 9981 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 9982 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9983 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 9984 // CHECK7-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 9985 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9986 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 9987 // CHECK7-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 9988 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 9989 // CHECK7-NEXT: store i8* null, i8** [[TMP21]], align 4 9990 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 9991 // CHECK7-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 9992 // CHECK7-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 9993 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 9994 // CHECK7-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 9995 // CHECK7-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 9996 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 9997 // CHECK7-NEXT: store i8* null, i8** [[TMP26]], align 4 9998 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 9999 // CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 10000 // CHECK7-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 10001 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 10002 // CHECK7-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* 10003 // CHECK7-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 10004 // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 10005 // CHECK7-NEXT: store i8* null, i8** [[TMP31]], align 4 10006 // CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10007 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10008 // CHECK7-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 10009 // CHECK7-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 10010 // CHECK7-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 10011 // CHECK7: omp_offload.failed: 10012 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] 10013 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] 10014 // CHECK7: omp_offload.cont: 10015 // CHECK7-NEXT: [[TMP36:%.*]] = load i32, i32* [[A]], align 4 10016 // CHECK7-NEXT: store i32 [[TMP36]], i32* [[A_CASTED3]], align 4 10017 // CHECK7-NEXT: [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4 10018 // CHECK7-NEXT: [[TMP38:%.*]] = load i16, i16* [[AA]], align 2 10019 // CHECK7-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 10020 // CHECK7-NEXT: store i16 [[TMP38]], i16* [[CONV5]], align 2 10021 // CHECK7-NEXT: [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 10022 // CHECK7-NEXT: [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4 10023 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10 10024 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 10025 // CHECK7: omp_if.then: 10026 // CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 10027 // CHECK7-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32* 10028 // CHECK7-NEXT: store i32 [[TMP37]], i32* [[TMP42]], align 4 10029 // CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 10030 // CHECK7-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 10031 // CHECK7-NEXT: store i32 [[TMP37]], i32* [[TMP44]], align 4 10032 // CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 10033 // CHECK7-NEXT: store i8* null, i8** [[TMP45]], align 4 10034 // CHECK7-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 10035 // CHECK7-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* 10036 // CHECK7-NEXT: store i32 [[TMP39]], i32* [[TMP47]], align 4 10037 // CHECK7-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 10038 // CHECK7-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* 10039 // CHECK7-NEXT: store i32 [[TMP39]], i32* [[TMP49]], align 4 10040 // CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 10041 // CHECK7-NEXT: store i8* null, i8** [[TMP50]], align 4 10042 // CHECK7-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 10043 // CHECK7-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 10044 // CHECK7-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 10045 // CHECK7-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 10046 // CHECK7-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 10047 // CHECK7: omp_offload.failed9: 10048 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 10049 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT10]] 10050 // CHECK7: omp_offload.cont10: 10051 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 10052 // CHECK7: omp_if.else: 10053 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 10054 // CHECK7-NEXT: br label [[OMP_IF_END]] 10055 // CHECK7: omp_if.end: 10056 // CHECK7-NEXT: [[TMP55:%.*]] = load i32, i32* [[A]], align 4 10057 // CHECK7-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4 10058 // CHECK7-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 10059 // CHECK7-NEXT: store i32 [[TMP56]], i32* [[A_CASTED11]], align 4 10060 // CHECK7-NEXT: [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4 10061 // CHECK7-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 10062 // CHECK7-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 10063 // CHECK7-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 10064 // CHECK7-NEXT: [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4 10065 // CHECK7-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20 10066 // CHECK7-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 10067 // CHECK7: omp_if.then13: 10068 // CHECK7-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4 10069 // CHECK7-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 10070 // CHECK7-NEXT: [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]] 10071 // CHECK7-NEXT: [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8 10072 // CHECK7-NEXT: [[TMP65:%.*]] = sext i32 [[TMP64]] to i64 10073 // CHECK7-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 10074 // CHECK7-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32* 10075 // CHECK7-NEXT: store i32 [[TMP57]], i32* [[TMP67]], align 4 10076 // CHECK7-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 10077 // CHECK7-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 10078 // CHECK7-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 10079 // CHECK7-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10080 // CHECK7-NEXT: store i64 4, i64* [[TMP70]], align 4 10081 // CHECK7-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 10082 // CHECK7-NEXT: store i8* null, i8** [[TMP71]], align 4 10083 // CHECK7-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 10084 // CHECK7-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** 10085 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 10086 // CHECK7-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 10087 // CHECK7-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 10088 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 10089 // CHECK7-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 10090 // CHECK7-NEXT: store i64 40, i64* [[TMP76]], align 4 10091 // CHECK7-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 10092 // CHECK7-NEXT: store i8* null, i8** [[TMP77]], align 4 10093 // CHECK7-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 10094 // CHECK7-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 10095 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 10096 // CHECK7-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 10097 // CHECK7-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* 10098 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 10099 // CHECK7-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 10100 // CHECK7-NEXT: store i64 4, i64* [[TMP82]], align 4 10101 // CHECK7-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 10102 // CHECK7-NEXT: store i8* null, i8** [[TMP83]], align 4 10103 // CHECK7-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 10104 // CHECK7-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** 10105 // CHECK7-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 10106 // CHECK7-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 10107 // CHECK7-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 10108 // CHECK7-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 10109 // CHECK7-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 10110 // CHECK7-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 10111 // CHECK7-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 10112 // CHECK7-NEXT: store i8* null, i8** [[TMP89]], align 4 10113 // CHECK7-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 10114 // CHECK7-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** 10115 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 10116 // CHECK7-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 10117 // CHECK7-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 10118 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 10119 // CHECK7-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 10120 // CHECK7-NEXT: store i64 400, i64* [[TMP94]], align 4 10121 // CHECK7-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 10122 // CHECK7-NEXT: store i8* null, i8** [[TMP95]], align 4 10123 // CHECK7-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 10124 // CHECK7-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* 10125 // CHECK7-NEXT: store i32 5, i32* [[TMP97]], align 4 10126 // CHECK7-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 10127 // CHECK7-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* 10128 // CHECK7-NEXT: store i32 5, i32* [[TMP99]], align 4 10129 // CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 10130 // CHECK7-NEXT: store i64 4, i64* [[TMP100]], align 4 10131 // CHECK7-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 10132 // CHECK7-NEXT: store i8* null, i8** [[TMP101]], align 4 10133 // CHECK7-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 10134 // CHECK7-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* 10135 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 10136 // CHECK7-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 10137 // CHECK7-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* 10138 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 10139 // CHECK7-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 10140 // CHECK7-NEXT: store i64 4, i64* [[TMP106]], align 4 10141 // CHECK7-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 10142 // CHECK7-NEXT: store i8* null, i8** [[TMP107]], align 4 10143 // CHECK7-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 10144 // CHECK7-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** 10145 // CHECK7-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 10146 // CHECK7-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 10147 // CHECK7-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 10148 // CHECK7-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 10149 // CHECK7-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 10150 // CHECK7-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 10151 // CHECK7-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 10152 // CHECK7-NEXT: store i8* null, i8** [[TMP113]], align 4 10153 // CHECK7-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 10154 // CHECK7-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** 10155 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 10156 // CHECK7-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 10157 // CHECK7-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 10158 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 10159 // CHECK7-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 10160 // CHECK7-NEXT: store i64 12, i64* [[TMP118]], align 4 10161 // CHECK7-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 10162 // CHECK7-NEXT: store i8* null, i8** [[TMP119]], align 4 10163 // CHECK7-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 10164 // CHECK7-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* 10165 // CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 10166 // CHECK7-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 10167 // CHECK7-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* 10168 // CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 10169 // CHECK7-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 10170 // CHECK7-NEXT: store i64 4, i64* [[TMP124]], align 4 10171 // CHECK7-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 10172 // CHECK7-NEXT: store i8* null, i8** [[TMP125]], align 4 10173 // CHECK7-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 10174 // CHECK7-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 10175 // CHECK7-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10176 // CHECK7-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 10177 // CHECK7-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 10178 // CHECK7-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 10179 // CHECK7: omp_offload.failed17: 10180 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 10181 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT18]] 10182 // CHECK7: omp_offload.cont18: 10183 // CHECK7-NEXT: br label [[OMP_IF_END20:%.*]] 10184 // CHECK7: omp_if.else19: 10185 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 10186 // CHECK7-NEXT: br label [[OMP_IF_END20]] 10187 // CHECK7: omp_if.end20: 10188 // CHECK7-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 10189 // CHECK7-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 10190 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) 10191 // CHECK7-NEXT: ret i32 [[TMP131]] 10192 // 10193 // 10194 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 10195 // CHECK7-SAME: () #[[ATTR2:[0-9]+]] { 10196 // CHECK7-NEXT: entry: 10197 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 10198 // CHECK7-NEXT: ret void 10199 // 10200 // 10201 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined. 10202 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 10203 // CHECK7-NEXT: entry: 10204 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 10205 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 10206 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10207 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 10208 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10209 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10210 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10211 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10212 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 10213 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 10214 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 10215 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10216 // CHECK7-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 10217 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10218 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10219 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 10220 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10221 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10222 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10223 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 10224 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10225 // CHECK7: cond.true: 10226 // CHECK7-NEXT: br label [[COND_END:%.*]] 10227 // CHECK7: cond.false: 10228 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10229 // CHECK7-NEXT: br label [[COND_END]] 10230 // CHECK7: cond.end: 10231 // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10232 // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10233 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10234 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10235 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10236 // CHECK7: omp.inner.for.cond: 10237 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 10238 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 10239 // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10240 // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10241 // CHECK7: omp.inner.for.body: 10242 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 10243 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 10244 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 10245 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 10246 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10247 // CHECK7: omp.body.continue: 10248 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10249 // CHECK7: omp.inner.for.inc: 10250 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 10251 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 10252 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 10253 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 10254 // CHECK7: omp.inner.for.end: 10255 // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10256 // CHECK7: omp.loop.exit: 10257 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10258 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10259 // CHECK7-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 10260 // CHECK7-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10261 // CHECK7: .omp.final.then: 10262 // CHECK7-NEXT: store i32 33, i32* [[I]], align 4 10263 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 10264 // CHECK7: .omp.final.done: 10265 // CHECK7-NEXT: ret void 10266 // 10267 // 10268 // CHECK7-LABEL: define {{[^@]+}}@.omp_task_entry. 10269 // CHECK7-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 10270 // CHECK7-NEXT: entry: 10271 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 10272 // CHECK7-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 10273 // CHECK7-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 10274 // CHECK7-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 10275 // CHECK7-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 10276 // CHECK7-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 10277 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 10278 // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 10279 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 10280 // CHECK7-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 10281 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 10282 // CHECK7-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 10283 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 10284 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 10285 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 10286 // CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 10287 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 10288 // CHECK7-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 10289 // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) 10290 // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 10291 // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 10292 // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) 10293 // CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 10294 // CHECK7-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 10295 // CHECK7-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 10296 // CHECK7-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 10297 // CHECK7-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 10298 // CHECK7-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 10299 // CHECK7-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 10300 // CHECK7-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 10301 // CHECK7-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10302 // CHECK7-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 10303 // CHECK7: omp_offload.failed.i: 10304 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 10305 // CHECK7-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 10306 // CHECK7: .omp_outlined..1.exit: 10307 // CHECK7-NEXT: ret i32 0 10308 // 10309 // 10310 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 10311 // CHECK7-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 10312 // CHECK7-NEXT: entry: 10313 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10314 // CHECK7-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 10315 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 10316 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10317 // CHECK7-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 10318 // CHECK7-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 10319 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 10320 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 10321 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 10322 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 10323 // CHECK7-NEXT: ret void 10324 // 10325 // 10326 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2 10327 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 10328 // CHECK7-NEXT: entry: 10329 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 10330 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 10331 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10332 // CHECK7-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 10333 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10334 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 10335 // CHECK7-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 10336 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10337 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10338 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10339 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10340 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 10341 // CHECK7-NEXT: [[K1:%.*]] = alloca i64, align 8 10342 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 10343 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 10344 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10345 // CHECK7-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 10346 // CHECK7-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 10347 // CHECK7-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 10348 // CHECK7-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 10349 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10350 // CHECK7-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 10351 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10352 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10353 // CHECK7-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 10354 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10355 // CHECK7-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 10356 // CHECK7-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 10357 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 10358 // CHECK7: omp.dispatch.cond: 10359 // CHECK7-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 10360 // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 10361 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 10362 // CHECK7: omp.dispatch.body: 10363 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10364 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 10365 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10366 // CHECK7: omp.inner.for.cond: 10367 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10368 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 10369 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 10370 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10371 // CHECK7: omp.inner.for.body: 10372 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10373 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 10374 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 10375 // CHECK7-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27 10376 // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27 10377 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10378 // CHECK7-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 10379 // CHECK7-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 10380 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 10381 // CHECK7-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27 10382 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27 10383 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 10384 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27 10385 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10386 // CHECK7: omp.body.continue: 10387 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10388 // CHECK7: omp.inner.for.inc: 10389 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10390 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 10391 // CHECK7-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10392 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 10393 // CHECK7: omp.inner.for.end: 10394 // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 10395 // CHECK7: omp.dispatch.inc: 10396 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] 10397 // CHECK7: omp.dispatch.end: 10398 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10399 // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 10400 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10401 // CHECK7: .omp.final.then: 10402 // CHECK7-NEXT: store i32 1, i32* [[I]], align 4 10403 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 10404 // CHECK7: .omp.final.done: 10405 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10406 // CHECK7-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 10407 // CHECK7-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 10408 // CHECK7: .omp.linear.pu: 10409 // CHECK7-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 10410 // CHECK7-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 10411 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 10412 // CHECK7: .omp.linear.pu.done: 10413 // CHECK7-NEXT: ret void 10414 // 10415 // 10416 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 10417 // CHECK7-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 10418 // CHECK7-NEXT: entry: 10419 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 10420 // CHECK7-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 10421 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10422 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 10423 // CHECK7-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 10424 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 10425 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 10426 // CHECK7-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 10427 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10428 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 10429 // CHECK7-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 10430 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 10431 // CHECK7-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 10432 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 10433 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 10434 // CHECK7-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 10435 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 10436 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 10437 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 10438 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 10439 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 10440 // CHECK7-NEXT: ret void 10441 // 10442 // 10443 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3 10444 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 10445 // CHECK7-NEXT: entry: 10446 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 10447 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 10448 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 10449 // CHECK7-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 10450 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10451 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 10452 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 10453 // CHECK7-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 10454 // CHECK7-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 10455 // CHECK7-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 10456 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 10457 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 10458 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 10459 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10460 // CHECK7-NEXT: [[IT:%.*]] = alloca i64, align 8 10461 // CHECK7-NEXT: [[LIN2:%.*]] = alloca i32, align 4 10462 // CHECK7-NEXT: [[A3:%.*]] = alloca i32, align 4 10463 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 10464 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 10465 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 10466 // CHECK7-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 10467 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10468 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 10469 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 10470 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 10471 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 10472 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 10473 // CHECK7-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 10474 // CHECK7-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 10475 // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 10476 // CHECK7-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 10477 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 10478 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10479 // CHECK7-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 10480 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10481 // CHECK7-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 10482 // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 10483 // CHECK7-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10484 // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 10485 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10486 // CHECK7: cond.true: 10487 // CHECK7-NEXT: br label [[COND_END:%.*]] 10488 // CHECK7: cond.false: 10489 // CHECK7-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10490 // CHECK7-NEXT: br label [[COND_END]] 10491 // CHECK7: cond.end: 10492 // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10493 // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 10494 // CHECK7-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 10495 // CHECK7-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 10496 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10497 // CHECK7: omp.inner.for.cond: 10498 // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10499 // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 10500 // CHECK7-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 10501 // CHECK7-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10502 // CHECK7: omp.inner.for.body: 10503 // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10504 // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 10505 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 10506 // CHECK7-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30 10507 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30 10508 // CHECK7-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 10509 // CHECK7-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10510 // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 10511 // CHECK7-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 10512 // CHECK7-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 10513 // CHECK7-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 10514 // CHECK7-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30 10515 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30 10516 // CHECK7-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 10517 // CHECK7-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10518 // CHECK7-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 10519 // CHECK7-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 10520 // CHECK7-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 10521 // CHECK7-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 10522 // CHECK7-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30 10523 // CHECK7-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 10524 // CHECK7-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 10525 // CHECK7-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 10526 // CHECK7-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 10527 // CHECK7-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !30 10528 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10529 // CHECK7: omp.body.continue: 10530 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10531 // CHECK7: omp.inner.for.inc: 10532 // CHECK7-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10533 // CHECK7-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 10534 // CHECK7-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 10535 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 10536 // CHECK7: omp.inner.for.end: 10537 // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10538 // CHECK7: omp.loop.exit: 10539 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10540 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10541 // CHECK7-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 10542 // CHECK7-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10543 // CHECK7: .omp.final.then: 10544 // CHECK7-NEXT: store i64 400, i64* [[IT]], align 8 10545 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 10546 // CHECK7: .omp.final.done: 10547 // CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10548 // CHECK7-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 10549 // CHECK7-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 10550 // CHECK7: .omp.linear.pu: 10551 // CHECK7-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 10552 // CHECK7-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 10553 // CHECK7-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 10554 // CHECK7-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 10555 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 10556 // CHECK7: .omp.linear.pu.done: 10557 // CHECK7-NEXT: ret void 10558 // 10559 // 10560 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 10561 // CHECK7-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 10562 // CHECK7-NEXT: entry: 10563 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10564 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 10565 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 10566 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 10567 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10568 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 10569 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 10570 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 10571 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 10572 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 10573 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 10574 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 10575 // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 10576 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 10577 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 10578 // CHECK7-NEXT: ret void 10579 // 10580 // 10581 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4 10582 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 10583 // CHECK7-NEXT: entry: 10584 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 10585 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 10586 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10587 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 10588 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10589 // CHECK7-NEXT: [[TMP:%.*]] = alloca i16, align 2 10590 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10591 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10592 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10593 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10594 // CHECK7-NEXT: [[IT:%.*]] = alloca i16, align 2 10595 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 10596 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 10597 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10598 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 10599 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 10600 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10601 // CHECK7-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 10602 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10603 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10604 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 10605 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10606 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10607 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10608 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 10609 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10610 // CHECK7: cond.true: 10611 // CHECK7-NEXT: br label [[COND_END:%.*]] 10612 // CHECK7: cond.false: 10613 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10614 // CHECK7-NEXT: br label [[COND_END]] 10615 // CHECK7: cond.end: 10616 // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10617 // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10618 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10619 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10620 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10621 // CHECK7: omp.inner.for.cond: 10622 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10623 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 10624 // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10625 // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10626 // CHECK7: omp.inner.for.body: 10627 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10628 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 10629 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 10630 // CHECK7-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 10631 // CHECK7-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33 10632 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 10633 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 10634 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 10635 // CHECK7-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 10636 // CHECK7-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 10637 // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 10638 // CHECK7-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 10639 // CHECK7-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !33 10640 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10641 // CHECK7: omp.body.continue: 10642 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10643 // CHECK7: omp.inner.for.inc: 10644 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10645 // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 10646 // CHECK7-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10647 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 10648 // CHECK7: omp.inner.for.end: 10649 // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10650 // CHECK7: omp.loop.exit: 10651 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10652 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10653 // CHECK7-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10654 // CHECK7-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10655 // CHECK7: .omp.final.then: 10656 // CHECK7-NEXT: store i16 22, i16* [[IT]], align 2 10657 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 10658 // CHECK7: .omp.final.done: 10659 // CHECK7-NEXT: ret void 10660 // 10661 // 10662 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 10663 // CHECK7-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 10664 // CHECK7-NEXT: entry: 10665 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10666 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 10667 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 10668 // CHECK7-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 10669 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 10670 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 10671 // CHECK7-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 10672 // CHECK7-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 10673 // CHECK7-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 10674 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 10675 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 10676 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 10677 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10678 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 10679 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 10680 // CHECK7-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 10681 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 10682 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 10683 // CHECK7-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 10684 // CHECK7-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 10685 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 10686 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 10687 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 10688 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 10689 // CHECK7-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 10690 // CHECK7-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 10691 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 10692 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 10693 // CHECK7-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 10694 // CHECK7-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 10695 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 10696 // CHECK7-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 10697 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 10698 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 10699 // CHECK7-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 10700 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 10701 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 10702 // CHECK7-NEXT: ret void 10703 // 10704 // 10705 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..7 10706 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 10707 // CHECK7-NEXT: entry: 10708 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 10709 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 10710 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 10711 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 10712 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 10713 // CHECK7-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 10714 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 10715 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 10716 // CHECK7-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 10717 // CHECK7-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 10718 // CHECK7-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 10719 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 10720 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10721 // CHECK7-NEXT: [[TMP:%.*]] = alloca i8, align 1 10722 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10723 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10724 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10725 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10726 // CHECK7-NEXT: [[IT:%.*]] = alloca i8, align 1 10727 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 10728 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 10729 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 10730 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 10731 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 10732 // CHECK7-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 10733 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 10734 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 10735 // CHECK7-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 10736 // CHECK7-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 10737 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 10738 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 10739 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 10740 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 10741 // CHECK7-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 10742 // CHECK7-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 10743 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 10744 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 10745 // CHECK7-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 10746 // CHECK7-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 10747 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10748 // CHECK7-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 10749 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10750 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10751 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 10752 // CHECK7-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 10753 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 10754 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 10755 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 10756 // CHECK7: omp.dispatch.cond: 10757 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10758 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 10759 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10760 // CHECK7: cond.true: 10761 // CHECK7-NEXT: br label [[COND_END:%.*]] 10762 // CHECK7: cond.false: 10763 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10764 // CHECK7-NEXT: br label [[COND_END]] 10765 // CHECK7: cond.end: 10766 // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 10767 // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10768 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10769 // CHECK7-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 10770 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10771 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10772 // CHECK7-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 10773 // CHECK7-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 10774 // CHECK7: omp.dispatch.body: 10775 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10776 // CHECK7: omp.inner.for.cond: 10777 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 10778 // CHECK7-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36 10779 // CHECK7-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 10780 // CHECK7-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10781 // CHECK7: omp.inner.for.body: 10782 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 10783 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 10784 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 10785 // CHECK7-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 10786 // CHECK7-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36 10787 // CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36 10788 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 10789 // CHECK7-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36 10790 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 10791 // CHECK7-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36 10792 // CHECK7-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 10793 // CHECK7-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 10794 // CHECK7-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 10795 // CHECK7-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36 10796 // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 10797 // CHECK7-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 10798 // CHECK7-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 10799 // CHECK7-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 10800 // CHECK7-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 10801 // CHECK7-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 10802 // CHECK7-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 10803 // CHECK7-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 10804 // CHECK7-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 10805 // CHECK7-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 10806 // CHECK7-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 10807 // CHECK7-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 10808 // CHECK7-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 10809 // CHECK7-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 10810 // CHECK7-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 10811 // CHECK7-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 10812 // CHECK7-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 10813 // CHECK7-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 10814 // CHECK7-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36 10815 // CHECK7-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 10816 // CHECK7-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36 10817 // CHECK7-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 10818 // CHECK7-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36 10819 // CHECK7-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 10820 // CHECK7-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 10821 // CHECK7-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 10822 // CHECK7-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36 10823 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10824 // CHECK7: omp.body.continue: 10825 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10826 // CHECK7: omp.inner.for.inc: 10827 // CHECK7-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 10828 // CHECK7-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 10829 // CHECK7-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 10830 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 10831 // CHECK7: omp.inner.for.end: 10832 // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 10833 // CHECK7: omp.dispatch.inc: 10834 // CHECK7-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10835 // CHECK7-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 10836 // CHECK7-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 10837 // CHECK7-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 10838 // CHECK7-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10839 // CHECK7-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 10840 // CHECK7-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 10841 // CHECK7-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 10842 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] 10843 // CHECK7: omp.dispatch.end: 10844 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 10845 // CHECK7-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10846 // CHECK7-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 10847 // CHECK7-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10848 // CHECK7: .omp.final.then: 10849 // CHECK7-NEXT: store i8 96, i8* [[IT]], align 1 10850 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 10851 // CHECK7: .omp.final.done: 10852 // CHECK7-NEXT: ret void 10853 // 10854 // 10855 // CHECK7-LABEL: define {{[^@]+}}@_Z3bari 10856 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 10857 // CHECK7-NEXT: entry: 10858 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 10859 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 10860 // CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 10861 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 10862 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 10863 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 10864 // CHECK7-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 10865 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 10866 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 10867 // CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4 10868 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 10869 // CHECK7-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 10870 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 10871 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 10872 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 10873 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 10874 // CHECK7-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 10875 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 10876 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 10877 // CHECK7-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 10878 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 10879 // CHECK7-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 10880 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 10881 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 10882 // CHECK7-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 10883 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 10884 // CHECK7-NEXT: ret i32 [[TMP8]] 10885 // 10886 // 10887 // CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 10888 // CHECK7-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 10889 // CHECK7-NEXT: entry: 10890 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 10891 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 10892 // CHECK7-NEXT: [[B:%.*]] = alloca i32, align 4 10893 // CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 10894 // CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 10895 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 10896 // CHECK7-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 10897 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 10898 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 10899 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 10900 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 10901 // CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 10902 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 10903 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 10904 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 10905 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 10906 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 10907 // CHECK7-NEXT: store i32 [[ADD]], i32* [[B]], align 4 10908 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 10909 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 10910 // CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 10911 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 10912 // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 10913 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 10914 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 10915 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 10916 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 10917 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 10918 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 10919 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 10920 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 10921 // CHECK7-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 10922 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 10923 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 10924 // CHECK7-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 10925 // CHECK7-NEXT: store i8 [[FROMBOOL2]], i8* [[CONV]], align 1 10926 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 10927 // CHECK7-NEXT: [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 10928 // CHECK7-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 10929 // CHECK7-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 10930 // CHECK7: omp_if.then: 10931 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 10932 // CHECK7-NEXT: [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]] 10933 // CHECK7-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2 10934 // CHECK7-NEXT: [[TMP12:%.*]] = sext i32 [[TMP11]] to i64 10935 // CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10936 // CHECK7-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1** 10937 // CHECK7-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 4 10938 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10939 // CHECK7-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** 10940 // CHECK7-NEXT: store double* [[A]], double** [[TMP16]], align 4 10941 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10942 // CHECK7-NEXT: store i64 8, i64* [[TMP17]], align 4 10943 // CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 10944 // CHECK7-NEXT: store i8* null, i8** [[TMP18]], align 4 10945 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 10946 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 10947 // CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 10948 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 10949 // CHECK7-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 10950 // CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 10951 // CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 10952 // CHECK7-NEXT: store i64 4, i64* [[TMP23]], align 4 10953 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 10954 // CHECK7-NEXT: store i8* null, i8** [[TMP24]], align 4 10955 // CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 10956 // CHECK7-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 10957 // CHECK7-NEXT: store i32 2, i32* [[TMP26]], align 4 10958 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 10959 // CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 10960 // CHECK7-NEXT: store i32 2, i32* [[TMP28]], align 4 10961 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 10962 // CHECK7-NEXT: store i64 4, i64* [[TMP29]], align 4 10963 // CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 10964 // CHECK7-NEXT: store i8* null, i8** [[TMP30]], align 4 10965 // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 10966 // CHECK7-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* 10967 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 10968 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 10969 // CHECK7-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* 10970 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 10971 // CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 10972 // CHECK7-NEXT: store i64 4, i64* [[TMP35]], align 4 10973 // CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 10974 // CHECK7-NEXT: store i8* null, i8** [[TMP36]], align 4 10975 // CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 10976 // CHECK7-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** 10977 // CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 10978 // CHECK7-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 10979 // CHECK7-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** 10980 // CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 10981 // CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 10982 // CHECK7-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 10983 // CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 10984 // CHECK7-NEXT: store i8* null, i8** [[TMP42]], align 4 10985 // CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 10986 // CHECK7-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 10987 // CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 10988 // CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 10989 // CHECK7-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* 10990 // CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 10991 // CHECK7-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 10992 // CHECK7-NEXT: store i64 1, i64* [[TMP47]], align 4 10993 // CHECK7-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 10994 // CHECK7-NEXT: store i8* null, i8** [[TMP48]], align 4 10995 // CHECK7-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10996 // CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10997 // CHECK7-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10998 // CHECK7-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 10999 // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP52]] to i1 11000 // CHECK7-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 11001 // CHECK7-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) 11002 // CHECK7-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 11003 // CHECK7-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11004 // CHECK7: omp_offload.failed: 11005 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] 11006 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] 11007 // CHECK7: omp_offload.cont: 11008 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 11009 // CHECK7: omp_if.else: 11010 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] 11011 // CHECK7-NEXT: br label [[OMP_IF_END]] 11012 // CHECK7: omp_if.end: 11013 // CHECK7-NEXT: [[TMP56:%.*]] = mul nsw i32 1, [[TMP1]] 11014 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP56]] 11015 // CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 11016 // CHECK7-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 11017 // CHECK7-NEXT: [[CONV6:%.*]] = sext i16 [[TMP57]] to i32 11018 // CHECK7-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 11019 // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP58]] 11020 // CHECK7-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 11021 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 11022 // CHECK7-NEXT: ret i32 [[ADD7]] 11023 // 11024 // 11025 // CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici 11026 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 11027 // CHECK7-NEXT: entry: 11028 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11029 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 11030 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 11031 // CHECK7-NEXT: [[AAA:%.*]] = alloca i8, align 1 11032 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 11033 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11034 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 11035 // CHECK7-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 11036 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 11037 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 11038 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 11039 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11040 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 11041 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 11042 // CHECK7-NEXT: store i8 0, i8* [[AAA]], align 1 11043 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 11044 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 11045 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 11046 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 11047 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 11048 // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 11049 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 11050 // CHECK7-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 11051 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 11052 // CHECK7-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 11053 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 11054 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 11055 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 11056 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11057 // CHECK7: omp_if.then: 11058 // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11059 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 11060 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 11061 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11062 // CHECK7-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 11063 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 11064 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 11065 // CHECK7-NEXT: store i8* null, i8** [[TMP11]], align 4 11066 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11067 // CHECK7-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 11068 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 11069 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11070 // CHECK7-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 11071 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 11072 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 11073 // CHECK7-NEXT: store i8* null, i8** [[TMP16]], align 4 11074 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11075 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 11076 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 11077 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11078 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 11079 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 11080 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 11081 // CHECK7-NEXT: store i8* null, i8** [[TMP21]], align 4 11082 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 11083 // CHECK7-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 11084 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 11085 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 11086 // CHECK7-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 11087 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 11088 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 11089 // CHECK7-NEXT: store i8* null, i8** [[TMP26]], align 4 11090 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11091 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11092 // CHECK7-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11093 // CHECK7-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 11094 // CHECK7-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11095 // CHECK7: omp_offload.failed: 11096 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 11097 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] 11098 // CHECK7: omp_offload.cont: 11099 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 11100 // CHECK7: omp_if.else: 11101 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 11102 // CHECK7-NEXT: br label [[OMP_IF_END]] 11103 // CHECK7: omp_if.end: 11104 // CHECK7-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 11105 // CHECK7-NEXT: ret i32 [[TMP31]] 11106 // 11107 // 11108 // CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 11109 // CHECK7-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 11110 // CHECK7-NEXT: entry: 11111 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11112 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 11113 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 11114 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 11115 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11116 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 11117 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 11118 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 11119 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 11120 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11121 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 11122 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 11123 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 11124 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 11125 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 11126 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 11127 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 11128 // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 11129 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 11130 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 11131 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 11132 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11133 // CHECK7: omp_if.then: 11134 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11135 // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 11136 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 11137 // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11138 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 11139 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 11140 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 11141 // CHECK7-NEXT: store i8* null, i8** [[TMP9]], align 4 11142 // CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11143 // CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 11144 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 11145 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11146 // CHECK7-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 11147 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 11148 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 11149 // CHECK7-NEXT: store i8* null, i8** [[TMP14]], align 4 11150 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11151 // CHECK7-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 11152 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 11153 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11154 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 11155 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 11156 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 11157 // CHECK7-NEXT: store i8* null, i8** [[TMP19]], align 4 11158 // CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11159 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11160 // CHECK7-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11161 // CHECK7-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 11162 // CHECK7-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11163 // CHECK7: omp_offload.failed: 11164 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 11165 // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] 11166 // CHECK7: omp_offload.cont: 11167 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 11168 // CHECK7: omp_if.else: 11169 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 11170 // CHECK7-NEXT: br label [[OMP_IF_END]] 11171 // CHECK7: omp_if.end: 11172 // CHECK7-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 11173 // CHECK7-NEXT: ret i32 [[TMP24]] 11174 // 11175 // 11176 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 11177 // CHECK7-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 11178 // CHECK7-NEXT: entry: 11179 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 11180 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 11181 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 11182 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 11183 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 11184 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 11185 // CHECK7-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 11186 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 11187 // CHECK7-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 11188 // CHECK7-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 11189 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 11190 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 11191 // CHECK7-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 11192 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 11193 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 11194 // CHECK7-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 11195 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 11196 // CHECK7-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 11197 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 11198 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 11199 // CHECK7-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 11200 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 11201 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 11202 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 11203 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 11204 // CHECK7-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 4 11205 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 11206 // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 11207 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 11208 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 11209 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 11210 // CHECK7-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 4 11211 // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 11212 // CHECK7-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11213 // CHECK7: omp_if.then: 11214 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) 11215 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 11216 // CHECK7: omp_if.else: 11217 // CHECK7-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 11218 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 11219 // CHECK7-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 11220 // CHECK7-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] 11221 // CHECK7-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 11222 // CHECK7-NEXT: br label [[OMP_IF_END]] 11223 // CHECK7: omp_if.end: 11224 // CHECK7-NEXT: ret void 11225 // 11226 // 11227 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..9 11228 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 11229 // CHECK7-NEXT: entry: 11230 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 11231 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 11232 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 11233 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 11234 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 11235 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 11236 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 11237 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 11238 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 11239 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 11240 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 11241 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 11242 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 11243 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11244 // CHECK7-NEXT: [[IT:%.*]] = alloca i64, align 8 11245 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 11246 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 11247 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 11248 // CHECK7-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 11249 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 11250 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 11251 // CHECK7-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 11252 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 11253 // CHECK7-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 11254 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 11255 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 11256 // CHECK7-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 11257 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 11258 // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 11259 // CHECK7-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 11260 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 11261 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11262 // CHECK7-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 4 11263 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 11264 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11265 // CHECK7: omp_if.then: 11266 // CHECK7-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 11267 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 11268 // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 11269 // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11270 // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 11271 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11272 // CHECK7: cond.true: 11273 // CHECK7-NEXT: br label [[COND_END:%.*]] 11274 // CHECK7: cond.false: 11275 // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11276 // CHECK7-NEXT: br label [[COND_END]] 11277 // CHECK7: cond.end: 11278 // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 11279 // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 11280 // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 11281 // CHECK7-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 11282 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11283 // CHECK7: omp.inner.for.cond: 11284 // CHECK7-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 11285 // CHECK7-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39 11286 // CHECK7-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 11287 // CHECK7-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11288 // CHECK7: omp.inner.for.body: 11289 // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 11290 // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 11291 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 11292 // CHECK7-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39 11293 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39 11294 // CHECK7-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double 11295 // CHECK7-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 11296 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 11297 // CHECK7-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !40, !llvm.access.group !39 11298 // CHECK7-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 11299 // CHECK7-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39 11300 // CHECK7-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 11301 // CHECK7-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39 11302 // CHECK7-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 11303 // CHECK7-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] 11304 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] 11305 // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 11306 // CHECK7-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39 11307 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11308 // CHECK7: omp.body.continue: 11309 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11310 // CHECK7: omp.inner.for.inc: 11311 // CHECK7-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 11312 // CHECK7-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 11313 // CHECK7-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 11314 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] 11315 // CHECK7: omp.inner.for.end: 11316 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 11317 // CHECK7: omp_if.else: 11318 // CHECK7-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 11319 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 11320 // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 11321 // CHECK7-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11322 // CHECK7-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 11323 // CHECK7-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] 11324 // CHECK7: cond.true10: 11325 // CHECK7-NEXT: br label [[COND_END12:%.*]] 11326 // CHECK7: cond.false11: 11327 // CHECK7-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11328 // CHECK7-NEXT: br label [[COND_END12]] 11329 // CHECK7: cond.end12: 11330 // CHECK7-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] 11331 // CHECK7-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 11332 // CHECK7-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 11333 // CHECK7-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 11334 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] 11335 // CHECK7: omp.inner.for.cond14: 11336 // CHECK7-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11337 // CHECK7-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11338 // CHECK7-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 11339 // CHECK7-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 11340 // CHECK7: omp.inner.for.body16: 11341 // CHECK7-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11342 // CHECK7-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 11343 // CHECK7-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] 11344 // CHECK7-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 11345 // CHECK7-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 11346 // CHECK7-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double 11347 // CHECK7-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 11348 // CHECK7-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 11349 // CHECK7-NEXT: store double [[ADD20]], double* [[A21]], align 4 11350 // CHECK7-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 11351 // CHECK7-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 11352 // CHECK7-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 11353 // CHECK7-NEXT: store double [[INC23]], double* [[A22]], align 4 11354 // CHECK7-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 11355 // CHECK7-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] 11356 // CHECK7-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] 11357 // CHECK7-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 11358 // CHECK7-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 11359 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 11360 // CHECK7: omp.body.continue27: 11361 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 11362 // CHECK7: omp.inner.for.inc28: 11363 // CHECK7-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11364 // CHECK7-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 11365 // CHECK7-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 11366 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP43:![0-9]+]] 11367 // CHECK7: omp.inner.for.end30: 11368 // CHECK7-NEXT: br label [[OMP_IF_END]] 11369 // CHECK7: omp_if.end: 11370 // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11371 // CHECK7: omp.loop.exit: 11372 // CHECK7-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 11373 // CHECK7-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 11374 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 11375 // CHECK7-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11376 // CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 11377 // CHECK7-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11378 // CHECK7: .omp.final.then: 11379 // CHECK7-NEXT: store i64 400, i64* [[IT]], align 8 11380 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 11381 // CHECK7: .omp.final.done: 11382 // CHECK7-NEXT: ret void 11383 // 11384 // 11385 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 11386 // CHECK7-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 11387 // CHECK7-NEXT: entry: 11388 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11389 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 11390 // CHECK7-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 11391 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 11392 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11393 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 11394 // CHECK7-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 11395 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11396 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 11397 // CHECK7-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 11398 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 11399 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 11400 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 11401 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 11402 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 11403 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 11404 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 11405 // CHECK7-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 11406 // CHECK7-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 11407 // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 11408 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 11409 // CHECK7-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 11410 // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 11411 // CHECK7-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 11412 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 11413 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 11414 // CHECK7-NEXT: ret void 11415 // 11416 // 11417 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11 11418 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 11419 // CHECK7-NEXT: entry: 11420 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 11421 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 11422 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11423 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 11424 // CHECK7-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 11425 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 11426 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11427 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 11428 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 11429 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 11430 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11431 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 11432 // CHECK7-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 11433 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 11434 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 11435 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 11436 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 11437 // CHECK7-NEXT: ret void 11438 // 11439 // 11440 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 11441 // CHECK7-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 11442 // CHECK7-NEXT: entry: 11443 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11444 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 11445 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 11446 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11447 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 11448 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11449 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 11450 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 11451 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 11452 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 11453 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 11454 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 11455 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 11456 // CHECK7-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 11457 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 11458 // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 11459 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 11460 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 11461 // CHECK7-NEXT: ret void 11462 // 11463 // 11464 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..14 11465 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 11466 // CHECK7-NEXT: entry: 11467 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 11468 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 11469 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11470 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 11471 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 11472 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 11473 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 11474 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 11475 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 11476 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 11477 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11478 // CHECK7-NEXT: [[I:%.*]] = alloca i64, align 8 11479 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 11480 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 11481 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11482 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 11483 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 11484 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 11485 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 11486 // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 11487 // CHECK7-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 11488 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 11489 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11490 // CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 11491 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 11492 // CHECK7-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 11493 // CHECK7-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11494 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 11495 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11496 // CHECK7: cond.true: 11497 // CHECK7-NEXT: br label [[COND_END:%.*]] 11498 // CHECK7: cond.false: 11499 // CHECK7-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11500 // CHECK7-NEXT: br label [[COND_END]] 11501 // CHECK7: cond.end: 11502 // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 11503 // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 11504 // CHECK7-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 11505 // CHECK7-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 11506 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11507 // CHECK7: omp.inner.for.cond: 11508 // CHECK7-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 11509 // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !45 11510 // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 11511 // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11512 // CHECK7: omp.inner.for.body: 11513 // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 11514 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 11515 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 11516 // CHECK7-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !45 11517 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45 11518 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 11519 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45 11520 // CHECK7-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !45 11521 // CHECK7-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 11522 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 11523 // CHECK7-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 11524 // CHECK7-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !45 11525 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 11526 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45 11527 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 11528 // CHECK7-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45 11529 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11530 // CHECK7: omp.body.continue: 11531 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11532 // CHECK7: omp.inner.for.inc: 11533 // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 11534 // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 11535 // CHECK7-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 11536 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] 11537 // CHECK7: omp.inner.for.end: 11538 // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11539 // CHECK7: omp.loop.exit: 11540 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 11541 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11542 // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 11543 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11544 // CHECK7: .omp.final.then: 11545 // CHECK7-NEXT: store i64 11, i64* [[I]], align 8 11546 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] 11547 // CHECK7: .omp.final.done: 11548 // CHECK7-NEXT: ret void 11549 // 11550 // 11551 // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 11552 // CHECK7-SAME: () #[[ATTR7:[0-9]+]] { 11553 // CHECK7-NEXT: entry: 11554 // CHECK7-NEXT: call void @__tgt_register_requires(i64 1) 11555 // CHECK7-NEXT: ret void 11556 // 11557 // 11558 // CHECK8-LABEL: define {{[^@]+}}@_Z7get_valv 11559 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] { 11560 // CHECK8-NEXT: entry: 11561 // CHECK8-NEXT: ret i64 0 11562 // 11563 // 11564 // CHECK8-LABEL: define {{[^@]+}}@_Z3fooi 11565 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 11566 // CHECK8-NEXT: entry: 11567 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11568 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 11569 // CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2 11570 // CHECK8-NEXT: [[B:%.*]] = alloca [10 x float], align 4 11571 // CHECK8-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 11572 // CHECK8-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 11573 // CHECK8-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 11574 // CHECK8-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 11575 // CHECK8-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 11576 // CHECK8-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 11577 // CHECK8-NEXT: [[K:%.*]] = alloca i64, align 8 11578 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11579 // CHECK8-NEXT: [[LIN:%.*]] = alloca i32, align 4 11580 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 11581 // CHECK8-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 11582 // CHECK8-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 11583 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 11584 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 11585 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 11586 // CHECK8-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 11587 // CHECK8-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 11588 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 11589 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 11590 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 11591 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 11592 // CHECK8-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 11593 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 11594 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 11595 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 11596 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 11597 // CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 11598 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 11599 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11600 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 11601 // CHECK8-NEXT: store i16 0, i16* [[AA]], align 2 11602 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 11603 // CHECK8-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 11604 // CHECK8-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 11605 // CHECK8-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 11606 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 11607 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 11608 // CHECK8-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 11609 // CHECK8-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 11610 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 11611 // CHECK8-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 11612 // CHECK8-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* 11613 // CHECK8-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 11614 // CHECK8-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) 11615 // CHECK8-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 11616 // CHECK8-NEXT: store i64 [[CALL]], i64* [[K]], align 8 11617 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 11618 // CHECK8-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 11619 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 11620 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] 11621 // CHECK8-NEXT: store i32 12, i32* [[LIN]], align 4 11622 // CHECK8-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 11623 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 11624 // CHECK8-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 11625 // CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 11626 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 11627 // CHECK8-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 11628 // CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 11629 // CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 11630 // CHECK8-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 11631 // CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 11632 // CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11633 // CHECK8-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 11634 // CHECK8-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 11635 // CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11636 // CHECK8-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 11637 // CHECK8-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 11638 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 11639 // CHECK8-NEXT: store i8* null, i8** [[TMP21]], align 4 11640 // CHECK8-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11641 // CHECK8-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 11642 // CHECK8-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 11643 // CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11644 // CHECK8-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 11645 // CHECK8-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 11646 // CHECK8-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 11647 // CHECK8-NEXT: store i8* null, i8** [[TMP26]], align 4 11648 // CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11649 // CHECK8-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 11650 // CHECK8-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 11651 // CHECK8-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11652 // CHECK8-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* 11653 // CHECK8-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 11654 // CHECK8-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 11655 // CHECK8-NEXT: store i8* null, i8** [[TMP31]], align 4 11656 // CHECK8-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11657 // CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11658 // CHECK8-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11659 // CHECK8-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 11660 // CHECK8-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11661 // CHECK8: omp_offload.failed: 11662 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] 11663 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] 11664 // CHECK8: omp_offload.cont: 11665 // CHECK8-NEXT: [[TMP36:%.*]] = load i32, i32* [[A]], align 4 11666 // CHECK8-NEXT: store i32 [[TMP36]], i32* [[A_CASTED3]], align 4 11667 // CHECK8-NEXT: [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4 11668 // CHECK8-NEXT: [[TMP38:%.*]] = load i16, i16* [[AA]], align 2 11669 // CHECK8-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 11670 // CHECK8-NEXT: store i16 [[TMP38]], i16* [[CONV5]], align 2 11671 // CHECK8-NEXT: [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 11672 // CHECK8-NEXT: [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4 11673 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10 11674 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11675 // CHECK8: omp_if.then: 11676 // CHECK8-NEXT: [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 11677 // CHECK8-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32* 11678 // CHECK8-NEXT: store i32 [[TMP37]], i32* [[TMP42]], align 4 11679 // CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 11680 // CHECK8-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 11681 // CHECK8-NEXT: store i32 [[TMP37]], i32* [[TMP44]], align 4 11682 // CHECK8-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 11683 // CHECK8-NEXT: store i8* null, i8** [[TMP45]], align 4 11684 // CHECK8-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 11685 // CHECK8-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* 11686 // CHECK8-NEXT: store i32 [[TMP39]], i32* [[TMP47]], align 4 11687 // CHECK8-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 11688 // CHECK8-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* 11689 // CHECK8-NEXT: store i32 [[TMP39]], i32* [[TMP49]], align 4 11690 // CHECK8-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 11691 // CHECK8-NEXT: store i8* null, i8** [[TMP50]], align 4 11692 // CHECK8-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 11693 // CHECK8-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 11694 // CHECK8-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11695 // CHECK8-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 11696 // CHECK8-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 11697 // CHECK8: omp_offload.failed9: 11698 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 11699 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT10]] 11700 // CHECK8: omp_offload.cont10: 11701 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 11702 // CHECK8: omp_if.else: 11703 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]] 11704 // CHECK8-NEXT: br label [[OMP_IF_END]] 11705 // CHECK8: omp_if.end: 11706 // CHECK8-NEXT: [[TMP55:%.*]] = load i32, i32* [[A]], align 4 11707 // CHECK8-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4 11708 // CHECK8-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 11709 // CHECK8-NEXT: store i32 [[TMP56]], i32* [[A_CASTED11]], align 4 11710 // CHECK8-NEXT: [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4 11711 // CHECK8-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 11712 // CHECK8-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 11713 // CHECK8-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 11714 // CHECK8-NEXT: [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4 11715 // CHECK8-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20 11716 // CHECK8-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 11717 // CHECK8: omp_if.then13: 11718 // CHECK8-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4 11719 // CHECK8-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 11720 // CHECK8-NEXT: [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]] 11721 // CHECK8-NEXT: [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8 11722 // CHECK8-NEXT: [[TMP65:%.*]] = sext i32 [[TMP64]] to i64 11723 // CHECK8-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 11724 // CHECK8-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32* 11725 // CHECK8-NEXT: store i32 [[TMP57]], i32* [[TMP67]], align 4 11726 // CHECK8-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 11727 // CHECK8-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 11728 // CHECK8-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 11729 // CHECK8-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11730 // CHECK8-NEXT: store i64 4, i64* [[TMP70]], align 4 11731 // CHECK8-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 11732 // CHECK8-NEXT: store i8* null, i8** [[TMP71]], align 4 11733 // CHECK8-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 11734 // CHECK8-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** 11735 // CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 11736 // CHECK8-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 11737 // CHECK8-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** 11738 // CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 11739 // CHECK8-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 11740 // CHECK8-NEXT: store i64 40, i64* [[TMP76]], align 4 11741 // CHECK8-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 11742 // CHECK8-NEXT: store i8* null, i8** [[TMP77]], align 4 11743 // CHECK8-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 11744 // CHECK8-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 11745 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 11746 // CHECK8-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 11747 // CHECK8-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* 11748 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 11749 // CHECK8-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 11750 // CHECK8-NEXT: store i64 4, i64* [[TMP82]], align 4 11751 // CHECK8-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 11752 // CHECK8-NEXT: store i8* null, i8** [[TMP83]], align 4 11753 // CHECK8-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 11754 // CHECK8-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** 11755 // CHECK8-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 11756 // CHECK8-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 11757 // CHECK8-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** 11758 // CHECK8-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 11759 // CHECK8-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 11760 // CHECK8-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 11761 // CHECK8-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 11762 // CHECK8-NEXT: store i8* null, i8** [[TMP89]], align 4 11763 // CHECK8-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 11764 // CHECK8-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** 11765 // CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 11766 // CHECK8-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 11767 // CHECK8-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** 11768 // CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 11769 // CHECK8-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 11770 // CHECK8-NEXT: store i64 400, i64* [[TMP94]], align 4 11771 // CHECK8-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 11772 // CHECK8-NEXT: store i8* null, i8** [[TMP95]], align 4 11773 // CHECK8-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 11774 // CHECK8-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* 11775 // CHECK8-NEXT: store i32 5, i32* [[TMP97]], align 4 11776 // CHECK8-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 11777 // CHECK8-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* 11778 // CHECK8-NEXT: store i32 5, i32* [[TMP99]], align 4 11779 // CHECK8-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 11780 // CHECK8-NEXT: store i64 4, i64* [[TMP100]], align 4 11781 // CHECK8-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 11782 // CHECK8-NEXT: store i8* null, i8** [[TMP101]], align 4 11783 // CHECK8-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 11784 // CHECK8-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* 11785 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 11786 // CHECK8-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 11787 // CHECK8-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* 11788 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 11789 // CHECK8-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 11790 // CHECK8-NEXT: store i64 4, i64* [[TMP106]], align 4 11791 // CHECK8-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 11792 // CHECK8-NEXT: store i8* null, i8** [[TMP107]], align 4 11793 // CHECK8-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 11794 // CHECK8-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** 11795 // CHECK8-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 11796 // CHECK8-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 11797 // CHECK8-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** 11798 // CHECK8-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 11799 // CHECK8-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 11800 // CHECK8-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 11801 // CHECK8-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 11802 // CHECK8-NEXT: store i8* null, i8** [[TMP113]], align 4 11803 // CHECK8-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 11804 // CHECK8-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** 11805 // CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 11806 // CHECK8-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 11807 // CHECK8-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** 11808 // CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 11809 // CHECK8-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 11810 // CHECK8-NEXT: store i64 12, i64* [[TMP118]], align 4 11811 // CHECK8-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 11812 // CHECK8-NEXT: store i8* null, i8** [[TMP119]], align 4 11813 // CHECK8-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 11814 // CHECK8-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* 11815 // CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 11816 // CHECK8-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 11817 // CHECK8-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* 11818 // CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 11819 // CHECK8-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 11820 // CHECK8-NEXT: store i64 4, i64* [[TMP124]], align 4 11821 // CHECK8-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 11822 // CHECK8-NEXT: store i8* null, i8** [[TMP125]], align 4 11823 // CHECK8-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 11824 // CHECK8-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 11825 // CHECK8-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11826 // CHECK8-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11827 // CHECK8-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 11828 // CHECK8-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 11829 // CHECK8: omp_offload.failed17: 11830 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 11831 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT18]] 11832 // CHECK8: omp_offload.cont18: 11833 // CHECK8-NEXT: br label [[OMP_IF_END20:%.*]] 11834 // CHECK8: omp_if.else19: 11835 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] 11836 // CHECK8-NEXT: br label [[OMP_IF_END20]] 11837 // CHECK8: omp_if.end20: 11838 // CHECK8-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 11839 // CHECK8-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 11840 // CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) 11841 // CHECK8-NEXT: ret i32 [[TMP131]] 11842 // 11843 // 11844 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 11845 // CHECK8-SAME: () #[[ATTR2:[0-9]+]] { 11846 // CHECK8-NEXT: entry: 11847 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 11848 // CHECK8-NEXT: ret void 11849 // 11850 // 11851 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined. 11852 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 11853 // CHECK8-NEXT: entry: 11854 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 11855 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 11856 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11857 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 11858 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11859 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11860 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11861 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11862 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 11863 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 11864 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 11865 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11866 // CHECK8-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 11867 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11868 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11869 // CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 11870 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 11871 // CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11872 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11873 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 11874 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11875 // CHECK8: cond.true: 11876 // CHECK8-NEXT: br label [[COND_END:%.*]] 11877 // CHECK8: cond.false: 11878 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11879 // CHECK8-NEXT: br label [[COND_END]] 11880 // CHECK8: cond.end: 11881 // CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 11882 // CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11883 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11884 // CHECK8-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 11885 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11886 // CHECK8: omp.inner.for.cond: 11887 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 11888 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 11889 // CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 11890 // CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11891 // CHECK8: omp.inner.for.body: 11892 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 11893 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 11894 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 11895 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 11896 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11897 // CHECK8: omp.body.continue: 11898 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11899 // CHECK8: omp.inner.for.inc: 11900 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 11901 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 11902 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 11903 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 11904 // CHECK8: omp.inner.for.end: 11905 // CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11906 // CHECK8: omp.loop.exit: 11907 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 11908 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11909 // CHECK8-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 11910 // CHECK8-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11911 // CHECK8: .omp.final.then: 11912 // CHECK8-NEXT: store i32 33, i32* [[I]], align 4 11913 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 11914 // CHECK8: .omp.final.done: 11915 // CHECK8-NEXT: ret void 11916 // 11917 // 11918 // CHECK8-LABEL: define {{[^@]+}}@.omp_task_entry. 11919 // CHECK8-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 11920 // CHECK8-NEXT: entry: 11921 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 11922 // CHECK8-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 11923 // CHECK8-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 11924 // CHECK8-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 11925 // CHECK8-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 11926 // CHECK8-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 11927 // CHECK8-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 11928 // CHECK8-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 11929 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 11930 // CHECK8-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 11931 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 11932 // CHECK8-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 11933 // CHECK8-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 11934 // CHECK8-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 11935 // CHECK8-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 11936 // CHECK8-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 11937 // CHECK8-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 11938 // CHECK8-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 11939 // CHECK8-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) 11940 // CHECK8-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 11941 // CHECK8-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 11942 // CHECK8-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) 11943 // CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 11944 // CHECK8-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 11945 // CHECK8-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 11946 // CHECK8-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 11947 // CHECK8-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 11948 // CHECK8-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 11949 // CHECK8-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 11950 // CHECK8-NEXT: [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 11951 // CHECK8-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 11952 // CHECK8-NEXT: br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] 11953 // CHECK8: omp_offload.failed.i: 11954 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] 11955 // CHECK8-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] 11956 // CHECK8: .omp_outlined..1.exit: 11957 // CHECK8-NEXT: ret i32 0 11958 // 11959 // 11960 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 11961 // CHECK8-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 11962 // CHECK8-NEXT: entry: 11963 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11964 // CHECK8-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 11965 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 11966 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11967 // CHECK8-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 11968 // CHECK8-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 11969 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 11970 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 11971 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 11972 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 11973 // CHECK8-NEXT: ret void 11974 // 11975 // 11976 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2 11977 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 11978 // CHECK8-NEXT: entry: 11979 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 11980 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 11981 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 11982 // CHECK8-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 11983 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11984 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 11985 // CHECK8-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 11986 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11987 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11988 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11989 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11990 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 11991 // CHECK8-NEXT: [[K1:%.*]] = alloca i64, align 8 11992 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 11993 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 11994 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 11995 // CHECK8-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 11996 // CHECK8-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 11997 // CHECK8-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 11998 // CHECK8-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 11999 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12000 // CHECK8-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 12001 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12002 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12003 // CHECK8-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12004 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12005 // CHECK8-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 12006 // CHECK8-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 12007 // CHECK8-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 12008 // CHECK8: omp.dispatch.cond: 12009 // CHECK8-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 12010 // CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 12011 // CHECK8-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 12012 // CHECK8: omp.dispatch.body: 12013 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12014 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 12015 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12016 // CHECK8: omp.inner.for.cond: 12017 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12018 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 12019 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 12020 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12021 // CHECK8: omp.inner.for.body: 12022 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12023 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 12024 // CHECK8-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 12025 // CHECK8-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27 12026 // CHECK8-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27 12027 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12028 // CHECK8-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 12029 // CHECK8-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 12030 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 12031 // CHECK8-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27 12032 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27 12033 // CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 12034 // CHECK8-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27 12035 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12036 // CHECK8: omp.body.continue: 12037 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12038 // CHECK8: omp.inner.for.inc: 12039 // CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12040 // CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 12041 // CHECK8-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12042 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 12043 // CHECK8: omp.inner.for.end: 12044 // CHECK8-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 12045 // CHECK8: omp.dispatch.inc: 12046 // CHECK8-NEXT: br label [[OMP_DISPATCH_COND]] 12047 // CHECK8: omp.dispatch.end: 12048 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12049 // CHECK8-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 12050 // CHECK8-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12051 // CHECK8: .omp.final.then: 12052 // CHECK8-NEXT: store i32 1, i32* [[I]], align 4 12053 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 12054 // CHECK8: .omp.final.done: 12055 // CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12056 // CHECK8-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 12057 // CHECK8-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 12058 // CHECK8: .omp.linear.pu: 12059 // CHECK8-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 12060 // CHECK8-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 12061 // CHECK8-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 12062 // CHECK8: .omp.linear.pu.done: 12063 // CHECK8-NEXT: ret void 12064 // 12065 // 12066 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 12067 // CHECK8-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 12068 // CHECK8-NEXT: entry: 12069 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 12070 // CHECK8-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 12071 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12072 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 12073 // CHECK8-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 12074 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 12075 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 12076 // CHECK8-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 12077 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12078 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 12079 // CHECK8-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 12080 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 12081 // CHECK8-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 12082 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 12083 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 12084 // CHECK8-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 12085 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 12086 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 12087 // CHECK8-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 12088 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 12089 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 12090 // CHECK8-NEXT: ret void 12091 // 12092 // 12093 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3 12094 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 12095 // CHECK8-NEXT: entry: 12096 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 12097 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 12098 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 12099 // CHECK8-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 12100 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12101 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 12102 // CHECK8-NEXT: [[TMP:%.*]] = alloca i64, align 4 12103 // CHECK8-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 12104 // CHECK8-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 12105 // CHECK8-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 12106 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 12107 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 12108 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 12109 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12110 // CHECK8-NEXT: [[IT:%.*]] = alloca i64, align 8 12111 // CHECK8-NEXT: [[LIN2:%.*]] = alloca i32, align 4 12112 // CHECK8-NEXT: [[A3:%.*]] = alloca i32, align 4 12113 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 12114 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 12115 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 12116 // CHECK8-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 12117 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12118 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 12119 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 12120 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 12121 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 12122 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 12123 // CHECK8-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 12124 // CHECK8-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 12125 // CHECK8-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 12126 // CHECK8-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 12127 // CHECK8-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 12128 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12129 // CHECK8-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12130 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12131 // CHECK8-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 12132 // CHECK8-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 12133 // CHECK8-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12134 // CHECK8-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 12135 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12136 // CHECK8: cond.true: 12137 // CHECK8-NEXT: br label [[COND_END:%.*]] 12138 // CHECK8: cond.false: 12139 // CHECK8-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12140 // CHECK8-NEXT: br label [[COND_END]] 12141 // CHECK8: cond.end: 12142 // CHECK8-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12143 // CHECK8-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 12144 // CHECK8-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 12145 // CHECK8-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 12146 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12147 // CHECK8: omp.inner.for.cond: 12148 // CHECK8-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12149 // CHECK8-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 12150 // CHECK8-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 12151 // CHECK8-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12152 // CHECK8: omp.inner.for.body: 12153 // CHECK8-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12154 // CHECK8-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 12155 // CHECK8-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 12156 // CHECK8-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30 12157 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30 12158 // CHECK8-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 12159 // CHECK8-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12160 // CHECK8-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 12161 // CHECK8-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 12162 // CHECK8-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 12163 // CHECK8-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 12164 // CHECK8-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30 12165 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30 12166 // CHECK8-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 12167 // CHECK8-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12168 // CHECK8-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30 12169 // CHECK8-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 12170 // CHECK8-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 12171 // CHECK8-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 12172 // CHECK8-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30 12173 // CHECK8-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 12174 // CHECK8-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 12175 // CHECK8-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 12176 // CHECK8-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 12177 // CHECK8-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !30 12178 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12179 // CHECK8: omp.body.continue: 12180 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12181 // CHECK8: omp.inner.for.inc: 12182 // CHECK8-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12183 // CHECK8-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 12184 // CHECK8-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 12185 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 12186 // CHECK8: omp.inner.for.end: 12187 // CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12188 // CHECK8: omp.loop.exit: 12189 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12190 // CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12191 // CHECK8-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 12192 // CHECK8-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12193 // CHECK8: .omp.final.then: 12194 // CHECK8-NEXT: store i64 400, i64* [[IT]], align 8 12195 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 12196 // CHECK8: .omp.final.done: 12197 // CHECK8-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12198 // CHECK8-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 12199 // CHECK8-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 12200 // CHECK8: .omp.linear.pu: 12201 // CHECK8-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 12202 // CHECK8-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 12203 // CHECK8-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 12204 // CHECK8-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 12205 // CHECK8-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 12206 // CHECK8: .omp.linear.pu.done: 12207 // CHECK8-NEXT: ret void 12208 // 12209 // 12210 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 12211 // CHECK8-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 12212 // CHECK8-NEXT: entry: 12213 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12214 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 12215 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 12216 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 12217 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12218 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 12219 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 12220 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 12221 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 12222 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 12223 // CHECK8-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 12224 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 12225 // CHECK8-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 12226 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 12227 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 12228 // CHECK8-NEXT: ret void 12229 // 12230 // 12231 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..4 12232 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 12233 // CHECK8-NEXT: entry: 12234 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 12235 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 12236 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12237 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 12238 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12239 // CHECK8-NEXT: [[TMP:%.*]] = alloca i16, align 2 12240 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12241 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12242 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12243 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12244 // CHECK8-NEXT: [[IT:%.*]] = alloca i16, align 2 12245 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 12246 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 12247 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12248 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 12249 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 12250 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12251 // CHECK8-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 12252 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12253 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12254 // CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12255 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12256 // CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12257 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12258 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 12259 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12260 // CHECK8: cond.true: 12261 // CHECK8-NEXT: br label [[COND_END:%.*]] 12262 // CHECK8: cond.false: 12263 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12264 // CHECK8-NEXT: br label [[COND_END]] 12265 // CHECK8: cond.end: 12266 // CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12267 // CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12268 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12269 // CHECK8-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12270 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12271 // CHECK8: omp.inner.for.cond: 12272 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12273 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 12274 // CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12275 // CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12276 // CHECK8: omp.inner.for.body: 12277 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12278 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 12279 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 12280 // CHECK8-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 12281 // CHECK8-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33 12282 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 12283 // CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 12284 // CHECK8-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 12285 // CHECK8-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 12286 // CHECK8-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 12287 // CHECK8-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 12288 // CHECK8-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 12289 // CHECK8-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !33 12290 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12291 // CHECK8: omp.body.continue: 12292 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12293 // CHECK8: omp.inner.for.inc: 12294 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12295 // CHECK8-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 12296 // CHECK8-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12297 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 12298 // CHECK8: omp.inner.for.end: 12299 // CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12300 // CHECK8: omp.loop.exit: 12301 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12302 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12303 // CHECK8-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 12304 // CHECK8-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12305 // CHECK8: .omp.final.then: 12306 // CHECK8-NEXT: store i16 22, i16* [[IT]], align 2 12307 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 12308 // CHECK8: .omp.final.done: 12309 // CHECK8-NEXT: ret void 12310 // 12311 // 12312 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 12313 // CHECK8-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 12314 // CHECK8-NEXT: entry: 12315 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12316 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 12317 // CHECK8-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 12318 // CHECK8-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 12319 // CHECK8-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 12320 // CHECK8-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 12321 // CHECK8-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 12322 // CHECK8-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 12323 // CHECK8-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 12324 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 12325 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 12326 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 12327 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12328 // CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 12329 // CHECK8-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 12330 // CHECK8-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 12331 // CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 12332 // CHECK8-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 12333 // CHECK8-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 12334 // CHECK8-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 12335 // CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 12336 // CHECK8-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12337 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 12338 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 12339 // CHECK8-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 12340 // CHECK8-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 12341 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 12342 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 12343 // CHECK8-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 12344 // CHECK8-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 12345 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 12346 // CHECK8-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 12347 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 12348 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12349 // CHECK8-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 12350 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 12351 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 12352 // CHECK8-NEXT: ret void 12353 // 12354 // 12355 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..7 12356 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 12357 // CHECK8-NEXT: entry: 12358 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 12359 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 12360 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 12361 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 12362 // CHECK8-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 12363 // CHECK8-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 12364 // CHECK8-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 12365 // CHECK8-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 12366 // CHECK8-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 12367 // CHECK8-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 12368 // CHECK8-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 12369 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 12370 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12371 // CHECK8-NEXT: [[TMP:%.*]] = alloca i8, align 1 12372 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12373 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12374 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12375 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12376 // CHECK8-NEXT: [[IT:%.*]] = alloca i8, align 1 12377 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 12378 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 12379 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 12380 // CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 12381 // CHECK8-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 12382 // CHECK8-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 12383 // CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 12384 // CHECK8-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 12385 // CHECK8-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 12386 // CHECK8-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 12387 // CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 12388 // CHECK8-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12389 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 12390 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 12391 // CHECK8-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 12392 // CHECK8-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 12393 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 12394 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 12395 // CHECK8-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 12396 // CHECK8-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 12397 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12398 // CHECK8-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 12399 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12400 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12401 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12402 // CHECK8-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12403 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 12404 // CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 12405 // CHECK8-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 12406 // CHECK8: omp.dispatch.cond: 12407 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12408 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 12409 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12410 // CHECK8: cond.true: 12411 // CHECK8-NEXT: br label [[COND_END:%.*]] 12412 // CHECK8: cond.false: 12413 // CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12414 // CHECK8-NEXT: br label [[COND_END]] 12415 // CHECK8: cond.end: 12416 // CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 12417 // CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12418 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12419 // CHECK8-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 12420 // CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12421 // CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12422 // CHECK8-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 12423 // CHECK8-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 12424 // CHECK8: omp.dispatch.body: 12425 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12426 // CHECK8: omp.inner.for.cond: 12427 // CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 12428 // CHECK8-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36 12429 // CHECK8-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 12430 // CHECK8-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12431 // CHECK8: omp.inner.for.body: 12432 // CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 12433 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 12434 // CHECK8-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 12435 // CHECK8-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 12436 // CHECK8-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36 12437 // CHECK8-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36 12438 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 12439 // CHECK8-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36 12440 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 12441 // CHECK8-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36 12442 // CHECK8-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 12443 // CHECK8-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 12444 // CHECK8-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 12445 // CHECK8-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36 12446 // CHECK8-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 12447 // CHECK8-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 12448 // CHECK8-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 12449 // CHECK8-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 12450 // CHECK8-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 12451 // CHECK8-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36 12452 // CHECK8-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 12453 // CHECK8-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 12454 // CHECK8-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 12455 // CHECK8-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 12456 // CHECK8-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36 12457 // CHECK8-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 12458 // CHECK8-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 12459 // CHECK8-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 12460 // CHECK8-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 12461 // CHECK8-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 12462 // CHECK8-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36 12463 // CHECK8-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 12464 // CHECK8-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36 12465 // CHECK8-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 12466 // CHECK8-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36 12467 // CHECK8-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 12468 // CHECK8-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36 12469 // CHECK8-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 12470 // CHECK8-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 12471 // CHECK8-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 12472 // CHECK8-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36 12473 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12474 // CHECK8: omp.body.continue: 12475 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12476 // CHECK8: omp.inner.for.inc: 12477 // CHECK8-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 12478 // CHECK8-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 12479 // CHECK8-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 12480 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 12481 // CHECK8: omp.inner.for.end: 12482 // CHECK8-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 12483 // CHECK8: omp.dispatch.inc: 12484 // CHECK8-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12485 // CHECK8-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12486 // CHECK8-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 12487 // CHECK8-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 12488 // CHECK8-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12489 // CHECK8-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12490 // CHECK8-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 12491 // CHECK8-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 12492 // CHECK8-NEXT: br label [[OMP_DISPATCH_COND]] 12493 // CHECK8: omp.dispatch.end: 12494 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 12495 // CHECK8-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12496 // CHECK8-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 12497 // CHECK8-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12498 // CHECK8: .omp.final.then: 12499 // CHECK8-NEXT: store i8 96, i8* [[IT]], align 1 12500 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 12501 // CHECK8: .omp.final.done: 12502 // CHECK8-NEXT: ret void 12503 // 12504 // 12505 // CHECK8-LABEL: define {{[^@]+}}@_Z3bari 12506 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 12507 // CHECK8-NEXT: entry: 12508 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12509 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 12510 // CHECK8-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 12511 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12512 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 12513 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 12514 // CHECK8-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 12515 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 12516 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 12517 // CHECK8-NEXT: store i32 [[ADD]], i32* [[A]], align 4 12518 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 12519 // CHECK8-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 12520 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 12521 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 12522 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 12523 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 12524 // CHECK8-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 12525 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 12526 // CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 12527 // CHECK8-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 12528 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 12529 // CHECK8-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 12530 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 12531 // CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 12532 // CHECK8-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 12533 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 12534 // CHECK8-NEXT: ret i32 [[TMP8]] 12535 // 12536 // 12537 // CHECK8-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 12538 // CHECK8-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 12539 // CHECK8-NEXT: entry: 12540 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 12541 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12542 // CHECK8-NEXT: [[B:%.*]] = alloca i32, align 4 12543 // CHECK8-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 12544 // CHECK8-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 12545 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 12546 // CHECK8-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 12547 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 12548 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 12549 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 12550 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 12551 // CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 12552 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 12553 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12554 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 12555 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 12556 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 12557 // CHECK8-NEXT: store i32 [[ADD]], i32* [[B]], align 4 12558 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 12559 // CHECK8-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 12560 // CHECK8-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 12561 // CHECK8-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 12562 // CHECK8-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 12563 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 12564 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 12565 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 12566 // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 12567 // CHECK8-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 12568 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 12569 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 12570 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 12571 // CHECK8-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12572 // CHECK8-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 12573 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 12574 // CHECK8-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 12575 // CHECK8-NEXT: store i8 [[FROMBOOL2]], i8* [[CONV]], align 1 12576 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 12577 // CHECK8-NEXT: [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12578 // CHECK8-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 12579 // CHECK8-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12580 // CHECK8: omp_if.then: 12581 // CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 12582 // CHECK8-NEXT: [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]] 12583 // CHECK8-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2 12584 // CHECK8-NEXT: [[TMP12:%.*]] = sext i32 [[TMP11]] to i64 12585 // CHECK8-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12586 // CHECK8-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1** 12587 // CHECK8-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 4 12588 // CHECK8-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12589 // CHECK8-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** 12590 // CHECK8-NEXT: store double* [[A]], double** [[TMP16]], align 4 12591 // CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 12592 // CHECK8-NEXT: store i64 8, i64* [[TMP17]], align 4 12593 // CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 12594 // CHECK8-NEXT: store i8* null, i8** [[TMP18]], align 4 12595 // CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 12596 // CHECK8-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 12597 // CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 12598 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 12599 // CHECK8-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* 12600 // CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 12601 // CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 12602 // CHECK8-NEXT: store i64 4, i64* [[TMP23]], align 4 12603 // CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 12604 // CHECK8-NEXT: store i8* null, i8** [[TMP24]], align 4 12605 // CHECK8-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 12606 // CHECK8-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 12607 // CHECK8-NEXT: store i32 2, i32* [[TMP26]], align 4 12608 // CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 12609 // CHECK8-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 12610 // CHECK8-NEXT: store i32 2, i32* [[TMP28]], align 4 12611 // CHECK8-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 12612 // CHECK8-NEXT: store i64 4, i64* [[TMP29]], align 4 12613 // CHECK8-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 12614 // CHECK8-NEXT: store i8* null, i8** [[TMP30]], align 4 12615 // CHECK8-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 12616 // CHECK8-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* 12617 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 12618 // CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 12619 // CHECK8-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* 12620 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 12621 // CHECK8-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 12622 // CHECK8-NEXT: store i64 4, i64* [[TMP35]], align 4 12623 // CHECK8-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 12624 // CHECK8-NEXT: store i8* null, i8** [[TMP36]], align 4 12625 // CHECK8-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 12626 // CHECK8-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** 12627 // CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 12628 // CHECK8-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 12629 // CHECK8-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** 12630 // CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 12631 // CHECK8-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 12632 // CHECK8-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 12633 // CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 12634 // CHECK8-NEXT: store i8* null, i8** [[TMP42]], align 4 12635 // CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 12636 // CHECK8-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 12637 // CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 12638 // CHECK8-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 12639 // CHECK8-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* 12640 // CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 12641 // CHECK8-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 12642 // CHECK8-NEXT: store i64 1, i64* [[TMP47]], align 4 12643 // CHECK8-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 12644 // CHECK8-NEXT: store i8* null, i8** [[TMP48]], align 4 12645 // CHECK8-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12646 // CHECK8-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12647 // CHECK8-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 12648 // CHECK8-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12649 // CHECK8-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP52]] to i1 12650 // CHECK8-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 12651 // CHECK8-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) 12652 // CHECK8-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 12653 // CHECK8-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12654 // CHECK8: omp_offload.failed: 12655 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] 12656 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] 12657 // CHECK8: omp_offload.cont: 12658 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 12659 // CHECK8: omp_if.else: 12660 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] 12661 // CHECK8-NEXT: br label [[OMP_IF_END]] 12662 // CHECK8: omp_if.end: 12663 // CHECK8-NEXT: [[TMP56:%.*]] = mul nsw i32 1, [[TMP1]] 12664 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP56]] 12665 // CHECK8-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 12666 // CHECK8-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 12667 // CHECK8-NEXT: [[CONV6:%.*]] = sext i16 [[TMP57]] to i32 12668 // CHECK8-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 12669 // CHECK8-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP58]] 12670 // CHECK8-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 12671 // CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 12672 // CHECK8-NEXT: ret i32 [[ADD7]] 12673 // 12674 // 12675 // CHECK8-LABEL: define {{[^@]+}}@_ZL7fstatici 12676 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 12677 // CHECK8-NEXT: entry: 12678 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12679 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 12680 // CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2 12681 // CHECK8-NEXT: [[AAA:%.*]] = alloca i8, align 1 12682 // CHECK8-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 12683 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 12684 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 12685 // CHECK8-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 12686 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 12687 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 12688 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 12689 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12690 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 12691 // CHECK8-NEXT: store i16 0, i16* [[AA]], align 2 12692 // CHECK8-NEXT: store i8 0, i8* [[AAA]], align 1 12693 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 12694 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 12695 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 12696 // CHECK8-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 12697 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 12698 // CHECK8-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 12699 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 12700 // CHECK8-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 12701 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 12702 // CHECK8-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 12703 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 12704 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 12705 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 12706 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12707 // CHECK8: omp_if.then: 12708 // CHECK8-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12709 // CHECK8-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 12710 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 12711 // CHECK8-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12712 // CHECK8-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 12713 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 12714 // CHECK8-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 12715 // CHECK8-NEXT: store i8* null, i8** [[TMP11]], align 4 12716 // CHECK8-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 12717 // CHECK8-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 12718 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 12719 // CHECK8-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 12720 // CHECK8-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 12721 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 12722 // CHECK8-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 12723 // CHECK8-NEXT: store i8* null, i8** [[TMP16]], align 4 12724 // CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 12725 // CHECK8-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 12726 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 12727 // CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 12728 // CHECK8-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 12729 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 12730 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 12731 // CHECK8-NEXT: store i8* null, i8** [[TMP21]], align 4 12732 // CHECK8-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 12733 // CHECK8-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 12734 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 12735 // CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 12736 // CHECK8-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 12737 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 12738 // CHECK8-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 12739 // CHECK8-NEXT: store i8* null, i8** [[TMP26]], align 4 12740 // CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12741 // CHECK8-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12742 // CHECK8-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 12743 // CHECK8-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 12744 // CHECK8-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12745 // CHECK8: omp_offload.failed: 12746 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 12747 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] 12748 // CHECK8: omp_offload.cont: 12749 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 12750 // CHECK8: omp_if.else: 12751 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 12752 // CHECK8-NEXT: br label [[OMP_IF_END]] 12753 // CHECK8: omp_if.end: 12754 // CHECK8-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 12755 // CHECK8-NEXT: ret i32 [[TMP31]] 12756 // 12757 // 12758 // CHECK8-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 12759 // CHECK8-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 12760 // CHECK8-NEXT: entry: 12761 // CHECK8-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12762 // CHECK8-NEXT: [[A:%.*]] = alloca i32, align 4 12763 // CHECK8-NEXT: [[AA:%.*]] = alloca i16, align 2 12764 // CHECK8-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 12765 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 12766 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 12767 // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 12768 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 12769 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 12770 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12771 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 12772 // CHECK8-NEXT: store i16 0, i16* [[AA]], align 2 12773 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 12774 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 12775 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 12776 // CHECK8-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 12777 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 12778 // CHECK8-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 12779 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 12780 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 12781 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 12782 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12783 // CHECK8: omp_if.then: 12784 // CHECK8-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12785 // CHECK8-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 12786 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 12787 // CHECK8-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12788 // CHECK8-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 12789 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 12790 // CHECK8-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 12791 // CHECK8-NEXT: store i8* null, i8** [[TMP9]], align 4 12792 // CHECK8-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 12793 // CHECK8-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 12794 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 12795 // CHECK8-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 12796 // CHECK8-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 12797 // CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 12798 // CHECK8-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 12799 // CHECK8-NEXT: store i8* null, i8** [[TMP14]], align 4 12800 // CHECK8-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 12801 // CHECK8-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 12802 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 12803 // CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 12804 // CHECK8-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 12805 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 12806 // CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 12807 // CHECK8-NEXT: store i8* null, i8** [[TMP19]], align 4 12808 // CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12809 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12810 // CHECK8-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 12811 // CHECK8-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 12812 // CHECK8-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12813 // CHECK8: omp_offload.failed: 12814 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 12815 // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] 12816 // CHECK8: omp_offload.cont: 12817 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 12818 // CHECK8: omp_if.else: 12819 // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 12820 // CHECK8-NEXT: br label [[OMP_IF_END]] 12821 // CHECK8: omp_if.end: 12822 // CHECK8-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 12823 // CHECK8-NEXT: ret i32 [[TMP24]] 12824 // 12825 // 12826 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 12827 // CHECK8-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 12828 // CHECK8-NEXT: entry: 12829 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 12830 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 12831 // CHECK8-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 12832 // CHECK8-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 12833 // CHECK8-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 12834 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 12835 // CHECK8-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 12836 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 12837 // CHECK8-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 12838 // CHECK8-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 12839 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 12840 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 12841 // CHECK8-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 12842 // CHECK8-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 12843 // CHECK8-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 12844 // CHECK8-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 12845 // CHECK8-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12846 // CHECK8-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 12847 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 12848 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 12849 // CHECK8-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 12850 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 12851 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 12852 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 12853 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 12854 // CHECK8-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 4 12855 // CHECK8-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 12856 // CHECK8-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 12857 // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 12858 // CHECK8-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 12859 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 12860 // CHECK8-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 4 12861 // CHECK8-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 12862 // CHECK8-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12863 // CHECK8: omp_if.then: 12864 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) 12865 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 12866 // CHECK8: omp_if.else: 12867 // CHECK8-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 12868 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 12869 // CHECK8-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 12870 // CHECK8-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] 12871 // CHECK8-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 12872 // CHECK8-NEXT: br label [[OMP_IF_END]] 12873 // CHECK8: omp_if.end: 12874 // CHECK8-NEXT: ret void 12875 // 12876 // 12877 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..9 12878 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 12879 // CHECK8-NEXT: entry: 12880 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 12881 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 12882 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 12883 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 12884 // CHECK8-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 12885 // CHECK8-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 12886 // CHECK8-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 12887 // CHECK8-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 12888 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 12889 // CHECK8-NEXT: [[TMP:%.*]] = alloca i64, align 4 12890 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 12891 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 12892 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 12893 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12894 // CHECK8-NEXT: [[IT:%.*]] = alloca i64, align 8 12895 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 12896 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 12897 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 12898 // CHECK8-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 12899 // CHECK8-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 12900 // CHECK8-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 12901 // CHECK8-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 12902 // CHECK8-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 12903 // CHECK8-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 12904 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 12905 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 12906 // CHECK8-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 12907 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 12908 // CHECK8-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 12909 // CHECK8-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 12910 // CHECK8-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 12911 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12912 // CHECK8-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 4 12913 // CHECK8-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 12914 // CHECK8-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12915 // CHECK8: omp_if.then: 12916 // CHECK8-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12917 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 12918 // CHECK8-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 12919 // CHECK8-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12920 // CHECK8-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 12921 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12922 // CHECK8: cond.true: 12923 // CHECK8-NEXT: br label [[COND_END:%.*]] 12924 // CHECK8: cond.false: 12925 // CHECK8-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12926 // CHECK8-NEXT: br label [[COND_END]] 12927 // CHECK8: cond.end: 12928 // CHECK8-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 12929 // CHECK8-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 12930 // CHECK8-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 12931 // CHECK8-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 12932 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12933 // CHECK8: omp.inner.for.cond: 12934 // CHECK8-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 12935 // CHECK8-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39 12936 // CHECK8-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 12937 // CHECK8-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12938 // CHECK8: omp.inner.for.body: 12939 // CHECK8-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 12940 // CHECK8-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 12941 // CHECK8-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 12942 // CHECK8-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39 12943 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39 12944 // CHECK8-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double 12945 // CHECK8-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 12946 // CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 12947 // CHECK8-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !40, !llvm.access.group !39 12948 // CHECK8-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 12949 // CHECK8-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39 12950 // CHECK8-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 12951 // CHECK8-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39 12952 // CHECK8-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 12953 // CHECK8-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] 12954 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] 12955 // CHECK8-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 12956 // CHECK8-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39 12957 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12958 // CHECK8: omp.body.continue: 12959 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12960 // CHECK8: omp.inner.for.inc: 12961 // CHECK8-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 12962 // CHECK8-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 12963 // CHECK8-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39 12964 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] 12965 // CHECK8: omp.inner.for.end: 12966 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 12967 // CHECK8: omp_if.else: 12968 // CHECK8-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 12969 // CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 12970 // CHECK8-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 12971 // CHECK8-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12972 // CHECK8-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 12973 // CHECK8-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] 12974 // CHECK8: cond.true10: 12975 // CHECK8-NEXT: br label [[COND_END12:%.*]] 12976 // CHECK8: cond.false11: 12977 // CHECK8-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12978 // CHECK8-NEXT: br label [[COND_END12]] 12979 // CHECK8: cond.end12: 12980 // CHECK8-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] 12981 // CHECK8-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 12982 // CHECK8-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 12983 // CHECK8-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 12984 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] 12985 // CHECK8: omp.inner.for.cond14: 12986 // CHECK8-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12987 // CHECK8-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12988 // CHECK8-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 12989 // CHECK8-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 12990 // CHECK8: omp.inner.for.body16: 12991 // CHECK8-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12992 // CHECK8-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 12993 // CHECK8-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] 12994 // CHECK8-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 12995 // CHECK8-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 12996 // CHECK8-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double 12997 // CHECK8-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 12998 // CHECK8-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 12999 // CHECK8-NEXT: store double [[ADD20]], double* [[A21]], align 4 13000 // CHECK8-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 13001 // CHECK8-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 13002 // CHECK8-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 13003 // CHECK8-NEXT: store double [[INC23]], double* [[A22]], align 4 13004 // CHECK8-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 13005 // CHECK8-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] 13006 // CHECK8-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] 13007 // CHECK8-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 13008 // CHECK8-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 13009 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 13010 // CHECK8: omp.body.continue27: 13011 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 13012 // CHECK8: omp.inner.for.inc28: 13013 // CHECK8-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13014 // CHECK8-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 13015 // CHECK8-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 13016 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP43:![0-9]+]] 13017 // CHECK8: omp.inner.for.end30: 13018 // CHECK8-NEXT: br label [[OMP_IF_END]] 13019 // CHECK8: omp_if.end: 13020 // CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13021 // CHECK8: omp.loop.exit: 13022 // CHECK8-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 13023 // CHECK8-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 13024 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 13025 // CHECK8-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13026 // CHECK8-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 13027 // CHECK8-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13028 // CHECK8: .omp.final.then: 13029 // CHECK8-NEXT: store i64 400, i64* [[IT]], align 8 13030 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 13031 // CHECK8: .omp.final.done: 13032 // CHECK8-NEXT: ret void 13033 // 13034 // 13035 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 13036 // CHECK8-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 13037 // CHECK8-NEXT: entry: 13038 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13039 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13040 // CHECK8-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 13041 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 13042 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 13043 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 13044 // CHECK8-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 13045 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13046 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13047 // CHECK8-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 13048 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 13049 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13050 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 13051 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 13052 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 13053 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 13054 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 13055 // CHECK8-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 13056 // CHECK8-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 13057 // CHECK8-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 13058 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 13059 // CHECK8-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 13060 // CHECK8-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 13061 // CHECK8-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 13062 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 13063 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 13064 // CHECK8-NEXT: ret void 13065 // 13066 // 13067 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11 13068 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 13069 // CHECK8-NEXT: entry: 13070 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 13071 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 13072 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13073 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13074 // CHECK8-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 13075 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 13076 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13077 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 13078 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 13079 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 13080 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13081 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13082 // CHECK8-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 13083 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 13084 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13085 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 13086 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 13087 // CHECK8-NEXT: ret void 13088 // 13089 // 13090 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 13091 // CHECK8-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 13092 // CHECK8-NEXT: entry: 13093 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13094 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13095 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 13096 // CHECK8-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 13097 // CHECK8-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 13098 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13099 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13100 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 13101 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13102 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 13103 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 13104 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 13105 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 13106 // CHECK8-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 13107 // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 13108 // CHECK8-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 13109 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 13110 // CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 13111 // CHECK8-NEXT: ret void 13112 // 13113 // 13114 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..14 13115 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 13116 // CHECK8-NEXT: entry: 13117 // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 13118 // CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 13119 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13120 // CHECK8-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13121 // CHECK8-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 13122 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13123 // CHECK8-NEXT: [[TMP:%.*]] = alloca i64, align 4 13124 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13125 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13126 // CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 13127 // CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13128 // CHECK8-NEXT: [[I:%.*]] = alloca i64, align 8 13129 // CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 13130 // CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 13131 // CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13132 // CHECK8-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13133 // CHECK8-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 13134 // CHECK8-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13135 // CHECK8-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 13136 // CHECK8-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13137 // CHECK8-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 13138 // CHECK8-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 13139 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13140 // CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 13141 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 13142 // CHECK8-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 13143 // CHECK8-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13144 // CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 13145 // CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13146 // CHECK8: cond.true: 13147 // CHECK8-NEXT: br label [[COND_END:%.*]] 13148 // CHECK8: cond.false: 13149 // CHECK8-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13150 // CHECK8-NEXT: br label [[COND_END]] 13151 // CHECK8: cond.end: 13152 // CHECK8-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 13153 // CHECK8-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 13154 // CHECK8-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13155 // CHECK8-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 13156 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13157 // CHECK8: omp.inner.for.cond: 13158 // CHECK8-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 13159 // CHECK8-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !45 13160 // CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 13161 // CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13162 // CHECK8: omp.inner.for.body: 13163 // CHECK8-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 13164 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 13165 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 13166 // CHECK8-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !45 13167 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45 13168 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 13169 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45 13170 // CHECK8-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !45 13171 // CHECK8-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 13172 // CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 13173 // CHECK8-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 13174 // CHECK8-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !45 13175 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 13176 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45 13177 // CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 13178 // CHECK8-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45 13179 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13180 // CHECK8: omp.body.continue: 13181 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13182 // CHECK8: omp.inner.for.inc: 13183 // CHECK8-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 13184 // CHECK8-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 13185 // CHECK8-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45 13186 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] 13187 // CHECK8: omp.inner.for.end: 13188 // CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13189 // CHECK8: omp.loop.exit: 13190 // CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 13191 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13192 // CHECK8-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 13193 // CHECK8-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13194 // CHECK8: .omp.final.then: 13195 // CHECK8-NEXT: store i64 11, i64* [[I]], align 8 13196 // CHECK8-NEXT: br label [[DOTOMP_FINAL_DONE]] 13197 // CHECK8: .omp.final.done: 13198 // CHECK8-NEXT: ret void 13199 // 13200 // 13201 // CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 13202 // CHECK8-SAME: () #[[ATTR7:[0-9]+]] { 13203 // CHECK8-NEXT: entry: 13204 // CHECK8-NEXT: call void @__tgt_register_requires(i64 1) 13205 // CHECK8-NEXT: ret void 13206 // 13207 // 13208 // CHECK9-LABEL: define {{[^@]+}}@_Z7get_valv 13209 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] { 13210 // CHECK9-NEXT: entry: 13211 // CHECK9-NEXT: ret i64 0 13212 // 13213 // 13214 // CHECK9-LABEL: define {{[^@]+}}@_Z3fooi 13215 // CHECK9-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 13216 // CHECK9-NEXT: entry: 13217 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13218 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 13219 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 13220 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x float], align 4 13221 // CHECK9-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 13222 // CHECK9-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 13223 // CHECK9-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 13224 // CHECK9-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 13225 // CHECK9-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 13226 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 13227 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13228 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13229 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13230 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 13231 // CHECK9-NEXT: [[K:%.*]] = alloca i64, align 8 13232 // CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 13233 // CHECK9-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 13234 // CHECK9-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 13235 // CHECK9-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 13236 // CHECK9-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 13237 // CHECK9-NEXT: [[I7:%.*]] = alloca i32, align 4 13238 // CHECK9-NEXT: [[K8:%.*]] = alloca i64, align 8 13239 // CHECK9-NEXT: [[LIN:%.*]] = alloca i32, align 4 13240 // CHECK9-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 13241 // CHECK9-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 13242 // CHECK9-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 13243 // CHECK9-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 13244 // CHECK9-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 13245 // CHECK9-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 13246 // CHECK9-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 13247 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 13248 // CHECK9-NEXT: [[LIN27:%.*]] = alloca i32, align 4 13249 // CHECK9-NEXT: [[A28:%.*]] = alloca i32, align 4 13250 // CHECK9-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 13251 // CHECK9-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 13252 // CHECK9-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 13253 // CHECK9-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 13254 // CHECK9-NEXT: [[IT53:%.*]] = alloca i16, align 2 13255 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 13256 // CHECK9-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 13257 // CHECK9-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 13258 // CHECK9-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 13259 // CHECK9-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 13260 // CHECK9-NEXT: [[IT72:%.*]] = alloca i8, align 1 13261 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13262 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 13263 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 13264 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 13265 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 13266 // CHECK9-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 13267 // CHECK9-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 13268 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 13269 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 13270 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 13271 // CHECK9-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 13272 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 13273 // CHECK9-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 13274 // CHECK9-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 13275 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13276 // CHECK9-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 13277 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13278 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 13279 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13280 // CHECK9: omp.inner.for.cond: 13281 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13282 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 13283 // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 13284 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13285 // CHECK9: omp.inner.for.body: 13286 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13287 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 13288 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 13289 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 13290 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13291 // CHECK9: omp.body.continue: 13292 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13293 // CHECK9: omp.inner.for.inc: 13294 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13295 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 13296 // CHECK9-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13297 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 13298 // CHECK9: omp.inner.for.end: 13299 // CHECK9-NEXT: store i32 33, i32* [[I]], align 4 13300 // CHECK9-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 13301 // CHECK9-NEXT: store i64 [[CALL]], i64* [[K]], align 8 13302 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 13303 // CHECK9-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 13304 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 13305 // CHECK9-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 13306 // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 13307 // CHECK9-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 13308 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 13309 // CHECK9: omp.inner.for.cond9: 13310 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13311 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 13312 // CHECK9-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 13313 // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 13314 // CHECK9: omp.inner.for.body11: 13315 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13316 // CHECK9-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 13317 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 13318 // CHECK9-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 13319 // CHECK9-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 13320 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13321 // CHECK9-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 13322 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 13323 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 13324 // CHECK9-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 13325 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 13326 // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 13327 // CHECK9-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 13328 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 13329 // CHECK9: omp.body.continue16: 13330 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 13331 // CHECK9: omp.inner.for.inc17: 13332 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13333 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 13334 // CHECK9-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13335 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 13336 // CHECK9: omp.inner.for.end19: 13337 // CHECK9-NEXT: store i32 1, i32* [[I7]], align 4 13338 // CHECK9-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 13339 // CHECK9-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 13340 // CHECK9-NEXT: store i32 12, i32* [[LIN]], align 4 13341 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 13342 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 13343 // CHECK9-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 13344 // CHECK9-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 13345 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 13346 // CHECK9-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 13347 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 13348 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 13349 // CHECK9-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 13350 // CHECK9-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 13351 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 13352 // CHECK9: omp.inner.for.cond29: 13353 // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13354 // CHECK9-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 13355 // CHECK9-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 13356 // CHECK9-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 13357 // CHECK9: omp.inner.for.body31: 13358 // CHECK9-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13359 // CHECK9-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 13360 // CHECK9-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 13361 // CHECK9-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 13362 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 13363 // CHECK9-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 13364 // CHECK9-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13365 // CHECK9-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 13366 // CHECK9-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 13367 // CHECK9-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 13368 // CHECK9-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 13369 // CHECK9-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 13370 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 13371 // CHECK9-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 13372 // CHECK9-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13373 // CHECK9-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 13374 // CHECK9-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 13375 // CHECK9-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 13376 // CHECK9-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 13377 // CHECK9-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 13378 // CHECK9-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 13379 // CHECK9-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 13380 // CHECK9-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 13381 // CHECK9-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 13382 // CHECK9-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 13383 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 13384 // CHECK9: omp.body.continue45: 13385 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 13386 // CHECK9: omp.inner.for.inc46: 13387 // CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13388 // CHECK9-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 13389 // CHECK9-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13390 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 13391 // CHECK9: omp.inner.for.end48: 13392 // CHECK9-NEXT: store i64 400, i64* [[IT]], align 8 13393 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 13394 // CHECK9-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 13395 // CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 13396 // CHECK9-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 13397 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 13398 // CHECK9-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 13399 // CHECK9-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 13400 // CHECK9-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 13401 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 13402 // CHECK9: omp.inner.for.cond54: 13403 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13404 // CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 13405 // CHECK9-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 13406 // CHECK9-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 13407 // CHECK9: omp.inner.for.body56: 13408 // CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13409 // CHECK9-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 13410 // CHECK9-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 13411 // CHECK9-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 13412 // CHECK9-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 13413 // CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 13414 // CHECK9-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 13415 // CHECK9-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 13416 // CHECK9-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 13417 // CHECK9-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 13418 // CHECK9-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 13419 // CHECK9-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 13420 // CHECK9-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 13421 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 13422 // CHECK9: omp.body.continue64: 13423 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 13424 // CHECK9: omp.inner.for.inc65: 13425 // CHECK9-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13426 // CHECK9-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 13427 // CHECK9-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13428 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 13429 // CHECK9: omp.inner.for.end67: 13430 // CHECK9-NEXT: store i16 22, i16* [[IT53]], align 2 13431 // CHECK9-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 13432 // CHECK9-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 13433 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 13434 // CHECK9-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 13435 // CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 13436 // CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 13437 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 13438 // CHECK9: omp.inner.for.cond73: 13439 // CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13440 // CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 13441 // CHECK9-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 13442 // CHECK9-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 13443 // CHECK9: omp.inner.for.body75: 13444 // CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13445 // CHECK9-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 13446 // CHECK9-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 13447 // CHECK9-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 13448 // CHECK9-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 13449 // CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 13450 // CHECK9-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 13451 // CHECK9-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 13452 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 13453 // CHECK9-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 13454 // CHECK9-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 13455 // CHECK9-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 13456 // CHECK9-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 13457 // CHECK9-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 13458 // CHECK9-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 13459 // CHECK9-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 13460 // CHECK9-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 13461 // CHECK9-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 13462 // CHECK9-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 13463 // CHECK9-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 13464 // CHECK9-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 13465 // CHECK9-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 13466 // CHECK9-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 13467 // CHECK9-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 13468 // CHECK9-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 13469 // CHECK9-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 13470 // CHECK9-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 13471 // CHECK9-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 13472 // CHECK9-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 13473 // CHECK9-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 13474 // CHECK9-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 13475 // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 13476 // CHECK9-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 13477 // CHECK9-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 13478 // CHECK9-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 13479 // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 13480 // CHECK9-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 13481 // CHECK9-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 13482 // CHECK9-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 13483 // CHECK9-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 13484 // CHECK9-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 13485 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 13486 // CHECK9: omp.body.continue97: 13487 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 13488 // CHECK9: omp.inner.for.inc98: 13489 // CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13490 // CHECK9-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 13491 // CHECK9-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13492 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 13493 // CHECK9: omp.inner.for.end100: 13494 // CHECK9-NEXT: store i8 96, i8* [[IT72]], align 1 13495 // CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 13496 // CHECK9-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 13497 // CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 13498 // CHECK9-NEXT: ret i32 [[TMP58]] 13499 // 13500 // 13501 // CHECK9-LABEL: define {{[^@]+}}@_Z3bari 13502 // CHECK9-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 13503 // CHECK9-NEXT: entry: 13504 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13505 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 13506 // CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 13507 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13508 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 13509 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 13510 // CHECK9-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 13511 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 13512 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 13513 // CHECK9-NEXT: store i32 [[ADD]], i32* [[A]], align 4 13514 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 13515 // CHECK9-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 13516 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 13517 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 13518 // CHECK9-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 13519 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 13520 // CHECK9-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 13521 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 13522 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 13523 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 13524 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 13525 // CHECK9-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 13526 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 13527 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 13528 // CHECK9-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 13529 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 13530 // CHECK9-NEXT: ret i32 [[TMP8]] 13531 // 13532 // 13533 // CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 13534 // CHECK9-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 13535 // CHECK9-NEXT: entry: 13536 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 13537 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13538 // CHECK9-NEXT: [[B:%.*]] = alloca i32, align 4 13539 // CHECK9-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 13540 // CHECK9-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 13541 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 13542 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13543 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13544 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13545 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 13546 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 13547 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13548 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 13549 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 13550 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 13551 // CHECK9-NEXT: store i32 [[ADD]], i32* [[B]], align 4 13552 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 13553 // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 13554 // CHECK9-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 13555 // CHECK9-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 13556 // CHECK9-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 13557 // CHECK9-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 13558 // CHECK9-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 13559 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13560 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 13561 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13562 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 13563 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13564 // CHECK9: omp.inner.for.cond: 13565 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 13566 // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 13567 // CHECK9-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] 13568 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13569 // CHECK9: omp.inner.for.body: 13570 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 13571 // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 13572 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 13573 // CHECK9-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 13574 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 13575 // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double 13576 // CHECK9-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 13577 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 13578 // CHECK9-NEXT: store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18 13579 // CHECK9-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 13580 // CHECK9-NEXT: [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18 13581 // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 13582 // CHECK9-NEXT: store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18 13583 // CHECK9-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 13584 // CHECK9-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] 13585 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]] 13586 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 13587 // CHECK9-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18 13588 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13589 // CHECK9: omp.body.continue: 13590 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13591 // CHECK9: omp.inner.for.inc: 13592 // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 13593 // CHECK9-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 13594 // CHECK9-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 13595 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 13596 // CHECK9: omp.inner.for.end: 13597 // CHECK9-NEXT: store i64 400, i64* [[IT]], align 8 13598 // CHECK9-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 13599 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 13600 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1 13601 // CHECK9-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 13602 // CHECK9-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 13603 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[B]], align 4 13604 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] 13605 // CHECK9-NEXT: [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 13606 // CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP16]]) 13607 // CHECK9-NEXT: ret i32 [[ADD10]] 13608 // 13609 // 13610 // CHECK9-LABEL: define {{[^@]+}}@_ZL7fstatici 13611 // CHECK9-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 13612 // CHECK9-NEXT: entry: 13613 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13614 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 13615 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 13616 // CHECK9-NEXT: [[AAA:%.*]] = alloca i8, align 1 13617 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 13618 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 13619 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13620 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13621 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13622 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 13623 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 13624 // CHECK9-NEXT: store i8 0, i8* [[AAA]], align 1 13625 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13626 // CHECK9-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 13627 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 13628 // CHECK9-NEXT: ret i32 [[TMP0]] 13629 // 13630 // 13631 // CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 13632 // CHECK9-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 13633 // CHECK9-NEXT: entry: 13634 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13635 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 13636 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 13637 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 13638 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 13639 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13640 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13641 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13642 // CHECK9-NEXT: [[I:%.*]] = alloca i64, align 8 13643 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13644 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 13645 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 13646 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13647 // CHECK9-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 13648 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13649 // CHECK9-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 13650 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13651 // CHECK9: omp.inner.for.cond: 13652 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 13653 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21 13654 // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 13655 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13656 // CHECK9: omp.inner.for.body: 13657 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 13658 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 13659 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 13660 // CHECK9-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21 13661 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21 13662 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13663 // CHECK9-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21 13664 // CHECK9-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21 13665 // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 13666 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 13667 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 13668 // CHECK9-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21 13669 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 13670 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 13671 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 13672 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 13673 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13674 // CHECK9: omp.body.continue: 13675 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13676 // CHECK9: omp.inner.for.inc: 13677 // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 13678 // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 13679 // CHECK9-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 13680 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 13681 // CHECK9: omp.inner.for.end: 13682 // CHECK9-NEXT: store i64 11, i64* [[I]], align 8 13683 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 13684 // CHECK9-NEXT: ret i32 [[TMP8]] 13685 // 13686 // 13687 // CHECK10-LABEL: define {{[^@]+}}@_Z7get_valv 13688 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] { 13689 // CHECK10-NEXT: entry: 13690 // CHECK10-NEXT: ret i64 0 13691 // 13692 // 13693 // CHECK10-LABEL: define {{[^@]+}}@_Z3fooi 13694 // CHECK10-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 13695 // CHECK10-NEXT: entry: 13696 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13697 // CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 13698 // CHECK10-NEXT: [[AA:%.*]] = alloca i16, align 2 13699 // CHECK10-NEXT: [[B:%.*]] = alloca [10 x float], align 4 13700 // CHECK10-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 13701 // CHECK10-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 13702 // CHECK10-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 13703 // CHECK10-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 13704 // CHECK10-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 13705 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 13706 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13707 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13708 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13709 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 13710 // CHECK10-NEXT: [[K:%.*]] = alloca i64, align 8 13711 // CHECK10-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 13712 // CHECK10-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 13713 // CHECK10-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 13714 // CHECK10-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 13715 // CHECK10-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 13716 // CHECK10-NEXT: [[I7:%.*]] = alloca i32, align 4 13717 // CHECK10-NEXT: [[K8:%.*]] = alloca i64, align 8 13718 // CHECK10-NEXT: [[LIN:%.*]] = alloca i32, align 4 13719 // CHECK10-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 13720 // CHECK10-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 13721 // CHECK10-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 13722 // CHECK10-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 13723 // CHECK10-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 13724 // CHECK10-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 13725 // CHECK10-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 13726 // CHECK10-NEXT: [[IT:%.*]] = alloca i64, align 8 13727 // CHECK10-NEXT: [[LIN27:%.*]] = alloca i32, align 4 13728 // CHECK10-NEXT: [[A28:%.*]] = alloca i32, align 4 13729 // CHECK10-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 13730 // CHECK10-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 13731 // CHECK10-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 13732 // CHECK10-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 13733 // CHECK10-NEXT: [[IT53:%.*]] = alloca i16, align 2 13734 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 13735 // CHECK10-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 13736 // CHECK10-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 13737 // CHECK10-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 13738 // CHECK10-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 13739 // CHECK10-NEXT: [[IT72:%.*]] = alloca i8, align 1 13740 // CHECK10-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13741 // CHECK10-NEXT: store i32 0, i32* [[A]], align 4 13742 // CHECK10-NEXT: store i16 0, i16* [[AA]], align 2 13743 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 13744 // CHECK10-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 13745 // CHECK10-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 13746 // CHECK10-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 13747 // CHECK10-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 13748 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 13749 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 13750 // CHECK10-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 13751 // CHECK10-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 13752 // CHECK10-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 13753 // CHECK10-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 13754 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13755 // CHECK10-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 13756 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13757 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 13758 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13759 // CHECK10: omp.inner.for.cond: 13760 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13761 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 13762 // CHECK10-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 13763 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13764 // CHECK10: omp.inner.for.body: 13765 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13766 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 13767 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 13768 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 13769 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13770 // CHECK10: omp.body.continue: 13771 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13772 // CHECK10: omp.inner.for.inc: 13773 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13774 // CHECK10-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 13775 // CHECK10-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 13776 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 13777 // CHECK10: omp.inner.for.end: 13778 // CHECK10-NEXT: store i32 33, i32* [[I]], align 4 13779 // CHECK10-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 13780 // CHECK10-NEXT: store i64 [[CALL]], i64* [[K]], align 8 13781 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 13782 // CHECK10-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 13783 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 13784 // CHECK10-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 13785 // CHECK10-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 13786 // CHECK10-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 13787 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 13788 // CHECK10: omp.inner.for.cond9: 13789 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13790 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 13791 // CHECK10-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 13792 // CHECK10-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 13793 // CHECK10: omp.inner.for.body11: 13794 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13795 // CHECK10-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 13796 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 13797 // CHECK10-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 13798 // CHECK10-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 13799 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13800 // CHECK10-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 13801 // CHECK10-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 13802 // CHECK10-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 13803 // CHECK10-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 13804 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 13805 // CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 13806 // CHECK10-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 13807 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 13808 // CHECK10: omp.body.continue16: 13809 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 13810 // CHECK10: omp.inner.for.inc17: 13811 // CHECK10-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13812 // CHECK10-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 13813 // CHECK10-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 13814 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 13815 // CHECK10: omp.inner.for.end19: 13816 // CHECK10-NEXT: store i32 1, i32* [[I7]], align 4 13817 // CHECK10-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 13818 // CHECK10-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 13819 // CHECK10-NEXT: store i32 12, i32* [[LIN]], align 4 13820 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 13821 // CHECK10-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 13822 // CHECK10-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 13823 // CHECK10-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 13824 // CHECK10-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 13825 // CHECK10-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 13826 // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 13827 // CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 13828 // CHECK10-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 13829 // CHECK10-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 13830 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 13831 // CHECK10: omp.inner.for.cond29: 13832 // CHECK10-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13833 // CHECK10-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 13834 // CHECK10-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 13835 // CHECK10-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 13836 // CHECK10: omp.inner.for.body31: 13837 // CHECK10-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13838 // CHECK10-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 13839 // CHECK10-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 13840 // CHECK10-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 13841 // CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 13842 // CHECK10-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 13843 // CHECK10-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13844 // CHECK10-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 13845 // CHECK10-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 13846 // CHECK10-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 13847 // CHECK10-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 13848 // CHECK10-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 13849 // CHECK10-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 13850 // CHECK10-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 13851 // CHECK10-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13852 // CHECK10-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 13853 // CHECK10-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 13854 // CHECK10-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 13855 // CHECK10-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 13856 // CHECK10-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 13857 // CHECK10-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 13858 // CHECK10-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 13859 // CHECK10-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 13860 // CHECK10-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 13861 // CHECK10-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 13862 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 13863 // CHECK10: omp.body.continue45: 13864 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 13865 // CHECK10: omp.inner.for.inc46: 13866 // CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13867 // CHECK10-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 13868 // CHECK10-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 13869 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 13870 // CHECK10: omp.inner.for.end48: 13871 // CHECK10-NEXT: store i64 400, i64* [[IT]], align 8 13872 // CHECK10-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 13873 // CHECK10-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 13874 // CHECK10-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 13875 // CHECK10-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 13876 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 13877 // CHECK10-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 13878 // CHECK10-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 13879 // CHECK10-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 13880 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 13881 // CHECK10: omp.inner.for.cond54: 13882 // CHECK10-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13883 // CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 13884 // CHECK10-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 13885 // CHECK10-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 13886 // CHECK10: omp.inner.for.body56: 13887 // CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13888 // CHECK10-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 13889 // CHECK10-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 13890 // CHECK10-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 13891 // CHECK10-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 13892 // CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 13893 // CHECK10-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 13894 // CHECK10-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 13895 // CHECK10-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 13896 // CHECK10-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 13897 // CHECK10-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 13898 // CHECK10-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 13899 // CHECK10-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 13900 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 13901 // CHECK10: omp.body.continue64: 13902 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 13903 // CHECK10: omp.inner.for.inc65: 13904 // CHECK10-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13905 // CHECK10-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 13906 // CHECK10-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 13907 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 13908 // CHECK10: omp.inner.for.end67: 13909 // CHECK10-NEXT: store i16 22, i16* [[IT53]], align 2 13910 // CHECK10-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 13911 // CHECK10-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 13912 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 13913 // CHECK10-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 13914 // CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 13915 // CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 13916 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 13917 // CHECK10: omp.inner.for.cond73: 13918 // CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13919 // CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 13920 // CHECK10-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 13921 // CHECK10-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 13922 // CHECK10: omp.inner.for.body75: 13923 // CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13924 // CHECK10-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 13925 // CHECK10-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 13926 // CHECK10-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 13927 // CHECK10-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 13928 // CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 13929 // CHECK10-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 13930 // CHECK10-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 13931 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 13932 // CHECK10-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 13933 // CHECK10-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 13934 // CHECK10-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 13935 // CHECK10-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 13936 // CHECK10-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 13937 // CHECK10-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 13938 // CHECK10-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 13939 // CHECK10-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 13940 // CHECK10-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 13941 // CHECK10-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 13942 // CHECK10-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 13943 // CHECK10-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 13944 // CHECK10-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 13945 // CHECK10-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 13946 // CHECK10-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 13947 // CHECK10-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 13948 // CHECK10-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 13949 // CHECK10-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 13950 // CHECK10-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 13951 // CHECK10-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 13952 // CHECK10-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 13953 // CHECK10-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 13954 // CHECK10-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 13955 // CHECK10-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 13956 // CHECK10-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 13957 // CHECK10-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 13958 // CHECK10-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 13959 // CHECK10-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 13960 // CHECK10-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 13961 // CHECK10-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 13962 // CHECK10-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 13963 // CHECK10-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 13964 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 13965 // CHECK10: omp.body.continue97: 13966 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 13967 // CHECK10: omp.inner.for.inc98: 13968 // CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13969 // CHECK10-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 13970 // CHECK10-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 13971 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 13972 // CHECK10: omp.inner.for.end100: 13973 // CHECK10-NEXT: store i8 96, i8* [[IT72]], align 1 13974 // CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 13975 // CHECK10-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 13976 // CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 13977 // CHECK10-NEXT: ret i32 [[TMP58]] 13978 // 13979 // 13980 // CHECK10-LABEL: define {{[^@]+}}@_Z3bari 13981 // CHECK10-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 13982 // CHECK10-NEXT: entry: 13983 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13984 // CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 13985 // CHECK10-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 13986 // CHECK10-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13987 // CHECK10-NEXT: store i32 0, i32* [[A]], align 4 13988 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 13989 // CHECK10-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 13990 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 13991 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 13992 // CHECK10-NEXT: store i32 [[ADD]], i32* [[A]], align 4 13993 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 13994 // CHECK10-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 13995 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 13996 // CHECK10-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 13997 // CHECK10-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 13998 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 13999 // CHECK10-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 14000 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 14001 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 14002 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 14003 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14004 // CHECK10-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 14005 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 14006 // CHECK10-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 14007 // CHECK10-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 14008 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14009 // CHECK10-NEXT: ret i32 [[TMP8]] 14010 // 14011 // 14012 // CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 14013 // CHECK10-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 14014 // CHECK10-NEXT: entry: 14015 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 14016 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14017 // CHECK10-NEXT: [[B:%.*]] = alloca i32, align 4 14018 // CHECK10-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 14019 // CHECK10-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 14020 // CHECK10-NEXT: [[TMP:%.*]] = alloca i64, align 8 14021 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14022 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14023 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14024 // CHECK10-NEXT: [[IT:%.*]] = alloca i64, align 8 14025 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 14026 // CHECK10-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14027 // CHECK10-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 14028 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14029 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 14030 // CHECK10-NEXT: store i32 [[ADD]], i32* [[B]], align 4 14031 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 14032 // CHECK10-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 14033 // CHECK10-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 14034 // CHECK10-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 14035 // CHECK10-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 14036 // CHECK10-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 14037 // CHECK10-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 14038 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14039 // CHECK10-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 14040 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14041 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 14042 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14043 // CHECK10: omp.inner.for.cond: 14044 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 14045 // CHECK10-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 14046 // CHECK10-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] 14047 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14048 // CHECK10: omp.inner.for.body: 14049 // CHECK10-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 14050 // CHECK10-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 14051 // CHECK10-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 14052 // CHECK10-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 14053 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 14054 // CHECK10-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double 14055 // CHECK10-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 14056 // CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 14057 // CHECK10-NEXT: store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18 14058 // CHECK10-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 14059 // CHECK10-NEXT: [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18 14060 // CHECK10-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 14061 // CHECK10-NEXT: store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18 14062 // CHECK10-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 14063 // CHECK10-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] 14064 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]] 14065 // CHECK10-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 14066 // CHECK10-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18 14067 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14068 // CHECK10: omp.body.continue: 14069 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14070 // CHECK10: omp.inner.for.inc: 14071 // CHECK10-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 14072 // CHECK10-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 14073 // CHECK10-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 14074 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 14075 // CHECK10: omp.inner.for.end: 14076 // CHECK10-NEXT: store i64 400, i64* [[IT]], align 8 14077 // CHECK10-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 14078 // CHECK10-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 14079 // CHECK10-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1 14080 // CHECK10-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 14081 // CHECK10-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 14082 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[B]], align 4 14083 // CHECK10-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] 14084 // CHECK10-NEXT: [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 14085 // CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP16]]) 14086 // CHECK10-NEXT: ret i32 [[ADD10]] 14087 // 14088 // 14089 // CHECK10-LABEL: define {{[^@]+}}@_ZL7fstatici 14090 // CHECK10-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 14091 // CHECK10-NEXT: entry: 14092 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14093 // CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 14094 // CHECK10-NEXT: [[AA:%.*]] = alloca i16, align 2 14095 // CHECK10-NEXT: [[AAA:%.*]] = alloca i8, align 1 14096 // CHECK10-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14097 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 14098 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14099 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14100 // CHECK10-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14101 // CHECK10-NEXT: store i32 0, i32* [[A]], align 4 14102 // CHECK10-NEXT: store i16 0, i16* [[AA]], align 2 14103 // CHECK10-NEXT: store i8 0, i8* [[AAA]], align 1 14104 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14105 // CHECK10-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 14106 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 14107 // CHECK10-NEXT: ret i32 [[TMP0]] 14108 // 14109 // 14110 // CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 14111 // CHECK10-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 14112 // CHECK10-NEXT: entry: 14113 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14114 // CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 14115 // CHECK10-NEXT: [[AA:%.*]] = alloca i16, align 2 14116 // CHECK10-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14117 // CHECK10-NEXT: [[TMP:%.*]] = alloca i64, align 8 14118 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14119 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14120 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14121 // CHECK10-NEXT: [[I:%.*]] = alloca i64, align 8 14122 // CHECK10-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14123 // CHECK10-NEXT: store i32 0, i32* [[A]], align 4 14124 // CHECK10-NEXT: store i16 0, i16* [[AA]], align 2 14125 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14126 // CHECK10-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 14127 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14128 // CHECK10-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 14129 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14130 // CHECK10: omp.inner.for.cond: 14131 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 14132 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21 14133 // CHECK10-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 14134 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14135 // CHECK10: omp.inner.for.body: 14136 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 14137 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 14138 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 14139 // CHECK10-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21 14140 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21 14141 // CHECK10-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14142 // CHECK10-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21 14143 // CHECK10-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21 14144 // CHECK10-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 14145 // CHECK10-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 14146 // CHECK10-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 14147 // CHECK10-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21 14148 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 14149 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 14150 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 14151 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 14152 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14153 // CHECK10: omp.body.continue: 14154 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14155 // CHECK10: omp.inner.for.inc: 14156 // CHECK10-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 14157 // CHECK10-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 14158 // CHECK10-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 14159 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 14160 // CHECK10: omp.inner.for.end: 14161 // CHECK10-NEXT: store i64 11, i64* [[I]], align 8 14162 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14163 // CHECK10-NEXT: ret i32 [[TMP8]] 14164 // 14165 // 14166 // CHECK11-LABEL: define {{[^@]+}}@_Z7get_valv 14167 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] { 14168 // CHECK11-NEXT: entry: 14169 // CHECK11-NEXT: ret i64 0 14170 // 14171 // 14172 // CHECK11-LABEL: define {{[^@]+}}@_Z3fooi 14173 // CHECK11-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14174 // CHECK11-NEXT: entry: 14175 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14176 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 14177 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 14178 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x float], align 4 14179 // CHECK11-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14180 // CHECK11-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14181 // CHECK11-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 14182 // CHECK11-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 14183 // CHECK11-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 14184 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 14185 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14186 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14187 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14188 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 14189 // CHECK11-NEXT: [[K:%.*]] = alloca i64, align 8 14190 // CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 14191 // CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 14192 // CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 14193 // CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 14194 // CHECK11-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 14195 // CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4 14196 // CHECK11-NEXT: [[K8:%.*]] = alloca i64, align 8 14197 // CHECK11-NEXT: [[LIN:%.*]] = alloca i32, align 4 14198 // CHECK11-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 14199 // CHECK11-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 14200 // CHECK11-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 14201 // CHECK11-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 14202 // CHECK11-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 14203 // CHECK11-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 14204 // CHECK11-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 14205 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 14206 // CHECK11-NEXT: [[LIN27:%.*]] = alloca i32, align 4 14207 // CHECK11-NEXT: [[A28:%.*]] = alloca i32, align 4 14208 // CHECK11-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 14209 // CHECK11-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 14210 // CHECK11-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 14211 // CHECK11-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 14212 // CHECK11-NEXT: [[IT53:%.*]] = alloca i16, align 2 14213 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 14214 // CHECK11-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 14215 // CHECK11-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 14216 // CHECK11-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 14217 // CHECK11-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 14218 // CHECK11-NEXT: [[IT72:%.*]] = alloca i8, align 1 14219 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14220 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 14221 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 14222 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14223 // CHECK11-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 14224 // CHECK11-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 14225 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 14226 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 14227 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 14228 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 14229 // CHECK11-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 14230 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 14231 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14232 // CHECK11-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 14233 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14234 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 14235 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14236 // CHECK11: omp.inner.for.cond: 14237 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14238 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 14239 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 14240 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14241 // CHECK11: omp.inner.for.body: 14242 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14243 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 14244 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 14245 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 14246 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14247 // CHECK11: omp.body.continue: 14248 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14249 // CHECK11: omp.inner.for.inc: 14250 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14251 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 14252 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14253 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 14254 // CHECK11: omp.inner.for.end: 14255 // CHECK11-NEXT: store i32 33, i32* [[I]], align 4 14256 // CHECK11-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 14257 // CHECK11-NEXT: store i64 [[CALL]], i64* [[K]], align 8 14258 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 14259 // CHECK11-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 14260 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 14261 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 14262 // CHECK11-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 14263 // CHECK11-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 14264 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 14265 // CHECK11: omp.inner.for.cond9: 14266 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14267 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 14268 // CHECK11-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 14269 // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 14270 // CHECK11: omp.inner.for.body11: 14271 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14272 // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 14273 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 14274 // CHECK11-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 14275 // CHECK11-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 14276 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14277 // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 14278 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 14279 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 14280 // CHECK11-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 14281 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 14282 // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 14283 // CHECK11-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 14284 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 14285 // CHECK11: omp.body.continue16: 14286 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 14287 // CHECK11: omp.inner.for.inc17: 14288 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14289 // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 14290 // CHECK11-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14291 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 14292 // CHECK11: omp.inner.for.end19: 14293 // CHECK11-NEXT: store i32 1, i32* [[I7]], align 4 14294 // CHECK11-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 14295 // CHECK11-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 14296 // CHECK11-NEXT: store i32 12, i32* [[LIN]], align 4 14297 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 14298 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 14299 // CHECK11-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 14300 // CHECK11-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 14301 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 14302 // CHECK11-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 14303 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 14304 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 14305 // CHECK11-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 14306 // CHECK11-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 14307 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 14308 // CHECK11: omp.inner.for.cond29: 14309 // CHECK11-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14310 // CHECK11-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 14311 // CHECK11-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 14312 // CHECK11-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 14313 // CHECK11: omp.inner.for.body31: 14314 // CHECK11-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14315 // CHECK11-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 14316 // CHECK11-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 14317 // CHECK11-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 14318 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 14319 // CHECK11-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 14320 // CHECK11-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14321 // CHECK11-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 14322 // CHECK11-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 14323 // CHECK11-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 14324 // CHECK11-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 14325 // CHECK11-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 14326 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 14327 // CHECK11-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 14328 // CHECK11-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14329 // CHECK11-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 14330 // CHECK11-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 14331 // CHECK11-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 14332 // CHECK11-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 14333 // CHECK11-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 14334 // CHECK11-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 14335 // CHECK11-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 14336 // CHECK11-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 14337 // CHECK11-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 14338 // CHECK11-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 14339 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 14340 // CHECK11: omp.body.continue45: 14341 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 14342 // CHECK11: omp.inner.for.inc46: 14343 // CHECK11-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14344 // CHECK11-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 14345 // CHECK11-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14346 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 14347 // CHECK11: omp.inner.for.end48: 14348 // CHECK11-NEXT: store i64 400, i64* [[IT]], align 8 14349 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 14350 // CHECK11-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 14351 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 14352 // CHECK11-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 14353 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 14354 // CHECK11-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 14355 // CHECK11-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 14356 // CHECK11-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 14357 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 14358 // CHECK11: omp.inner.for.cond54: 14359 // CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14360 // CHECK11-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 14361 // CHECK11-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 14362 // CHECK11-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 14363 // CHECK11: omp.inner.for.body56: 14364 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14365 // CHECK11-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 14366 // CHECK11-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 14367 // CHECK11-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 14368 // CHECK11-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 14369 // CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 14370 // CHECK11-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 14371 // CHECK11-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 14372 // CHECK11-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 14373 // CHECK11-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 14374 // CHECK11-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 14375 // CHECK11-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 14376 // CHECK11-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 14377 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 14378 // CHECK11: omp.body.continue64: 14379 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 14380 // CHECK11: omp.inner.for.inc65: 14381 // CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14382 // CHECK11-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 14383 // CHECK11-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14384 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 14385 // CHECK11: omp.inner.for.end67: 14386 // CHECK11-NEXT: store i16 22, i16* [[IT53]], align 2 14387 // CHECK11-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 14388 // CHECK11-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 14389 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 14390 // CHECK11-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 14391 // CHECK11-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 14392 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 14393 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 14394 // CHECK11: omp.inner.for.cond73: 14395 // CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14396 // CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 14397 // CHECK11-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 14398 // CHECK11-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 14399 // CHECK11: omp.inner.for.body75: 14400 // CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14401 // CHECK11-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 14402 // CHECK11-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 14403 // CHECK11-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 14404 // CHECK11-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 14405 // CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 14406 // CHECK11-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 14407 // CHECK11-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 14408 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 14409 // CHECK11-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 14410 // CHECK11-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 14411 // CHECK11-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 14412 // CHECK11-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 14413 // CHECK11-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 14414 // CHECK11-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 14415 // CHECK11-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 14416 // CHECK11-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 14417 // CHECK11-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 14418 // CHECK11-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 14419 // CHECK11-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 14420 // CHECK11-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 14421 // CHECK11-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 14422 // CHECK11-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 14423 // CHECK11-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 14424 // CHECK11-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 14425 // CHECK11-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 14426 // CHECK11-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 14427 // CHECK11-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 14428 // CHECK11-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 14429 // CHECK11-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 14430 // CHECK11-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 14431 // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 14432 // CHECK11-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 14433 // CHECK11-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 14434 // CHECK11-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 14435 // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 14436 // CHECK11-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 14437 // CHECK11-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 14438 // CHECK11-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 14439 // CHECK11-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 14440 // CHECK11-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 14441 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 14442 // CHECK11: omp.body.continue97: 14443 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 14444 // CHECK11: omp.inner.for.inc98: 14445 // CHECK11-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14446 // CHECK11-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 14447 // CHECK11-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14448 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 14449 // CHECK11: omp.inner.for.end100: 14450 // CHECK11-NEXT: store i8 96, i8* [[IT72]], align 1 14451 // CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 14452 // CHECK11-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 14453 // CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 14454 // CHECK11-NEXT: ret i32 [[TMP56]] 14455 // 14456 // 14457 // CHECK11-LABEL: define {{[^@]+}}@_Z3bari 14458 // CHECK11-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14459 // CHECK11-NEXT: entry: 14460 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14461 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 14462 // CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 14463 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14464 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 14465 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14466 // CHECK11-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 14467 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 14468 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 14469 // CHECK11-NEXT: store i32 [[ADD]], i32* [[A]], align 4 14470 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 14471 // CHECK11-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 14472 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 14473 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 14474 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 14475 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 14476 // CHECK11-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 14477 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 14478 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 14479 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 14480 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14481 // CHECK11-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 14482 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 14483 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 14484 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 14485 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14486 // CHECK11-NEXT: ret i32 [[TMP8]] 14487 // 14488 // 14489 // CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 14490 // CHECK11-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 14491 // CHECK11-NEXT: entry: 14492 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 14493 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14494 // CHECK11-NEXT: [[B:%.*]] = alloca i32, align 4 14495 // CHECK11-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14496 // CHECK11-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14497 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 14498 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14499 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14500 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14501 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 14502 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 14503 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14504 // CHECK11-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 14505 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14506 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 14507 // CHECK11-NEXT: store i32 [[ADD]], i32* [[B]], align 4 14508 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 14509 // CHECK11-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 14510 // CHECK11-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 14511 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 14512 // CHECK11-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 14513 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 14514 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14515 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 14516 // CHECK11-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14517 // CHECK11-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8 14518 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14519 // CHECK11: omp.inner.for.cond: 14520 // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 14521 // CHECK11-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 14522 // CHECK11-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] 14523 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14524 // CHECK11: omp.inner.for.body: 14525 // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 14526 // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 14527 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 14528 // CHECK11-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 14529 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 14530 // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double 14531 // CHECK11-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 14532 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 14533 // CHECK11-NEXT: store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19 14534 // CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 14535 // CHECK11-NEXT: [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19 14536 // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 14537 // CHECK11-NEXT: store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19 14538 // CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 14539 // CHECK11-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] 14540 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]] 14541 // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 14542 // CHECK11-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19 14543 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14544 // CHECK11: omp.body.continue: 14545 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14546 // CHECK11: omp.inner.for.inc: 14547 // CHECK11-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 14548 // CHECK11-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 14549 // CHECK11-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 14550 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 14551 // CHECK11: omp.inner.for.end: 14552 // CHECK11-NEXT: store i64 400, i64* [[IT]], align 8 14553 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 14554 // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 14555 // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1 14556 // CHECK11-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 14557 // CHECK11-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 14558 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4 14559 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] 14560 // CHECK11-NEXT: [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 14561 // CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP15]]) 14562 // CHECK11-NEXT: ret i32 [[ADD10]] 14563 // 14564 // 14565 // CHECK11-LABEL: define {{[^@]+}}@_ZL7fstatici 14566 // CHECK11-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14567 // CHECK11-NEXT: entry: 14568 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14569 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 14570 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 14571 // CHECK11-NEXT: [[AAA:%.*]] = alloca i8, align 1 14572 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14573 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 14574 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14575 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14576 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14577 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 14578 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 14579 // CHECK11-NEXT: store i8 0, i8* [[AAA]], align 1 14580 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14581 // CHECK11-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 14582 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 14583 // CHECK11-NEXT: ret i32 [[TMP0]] 14584 // 14585 // 14586 // CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 14587 // CHECK11-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 14588 // CHECK11-NEXT: entry: 14589 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14590 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 14591 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 14592 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14593 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 14594 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14595 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14596 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14597 // CHECK11-NEXT: [[I:%.*]] = alloca i64, align 8 14598 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14599 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 14600 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 14601 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14602 // CHECK11-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 14603 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14604 // CHECK11-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 14605 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14606 // CHECK11: omp.inner.for.cond: 14607 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 14608 // CHECK11-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22 14609 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 14610 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14611 // CHECK11: omp.inner.for.body: 14612 // CHECK11-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 14613 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 14614 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 14615 // CHECK11-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22 14616 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22 14617 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14618 // CHECK11-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22 14619 // CHECK11-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22 14620 // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 14621 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 14622 // CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 14623 // CHECK11-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22 14624 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 14625 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 14626 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 14627 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 14628 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14629 // CHECK11: omp.body.continue: 14630 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14631 // CHECK11: omp.inner.for.inc: 14632 // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 14633 // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 14634 // CHECK11-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 14635 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 14636 // CHECK11: omp.inner.for.end: 14637 // CHECK11-NEXT: store i64 11, i64* [[I]], align 8 14638 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14639 // CHECK11-NEXT: ret i32 [[TMP8]] 14640 // 14641 // 14642 // CHECK12-LABEL: define {{[^@]+}}@_Z7get_valv 14643 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] { 14644 // CHECK12-NEXT: entry: 14645 // CHECK12-NEXT: ret i64 0 14646 // 14647 // 14648 // CHECK12-LABEL: define {{[^@]+}}@_Z3fooi 14649 // CHECK12-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14650 // CHECK12-NEXT: entry: 14651 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14652 // CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 14653 // CHECK12-NEXT: [[AA:%.*]] = alloca i16, align 2 14654 // CHECK12-NEXT: [[B:%.*]] = alloca [10 x float], align 4 14655 // CHECK12-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14656 // CHECK12-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14657 // CHECK12-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 14658 // CHECK12-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 14659 // CHECK12-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 14660 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 14661 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14662 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14663 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14664 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 14665 // CHECK12-NEXT: [[K:%.*]] = alloca i64, align 8 14666 // CHECK12-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 14667 // CHECK12-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 14668 // CHECK12-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 14669 // CHECK12-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 14670 // CHECK12-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 14671 // CHECK12-NEXT: [[I7:%.*]] = alloca i32, align 4 14672 // CHECK12-NEXT: [[K8:%.*]] = alloca i64, align 8 14673 // CHECK12-NEXT: [[LIN:%.*]] = alloca i32, align 4 14674 // CHECK12-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 14675 // CHECK12-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 14676 // CHECK12-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 14677 // CHECK12-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 14678 // CHECK12-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 14679 // CHECK12-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 14680 // CHECK12-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 14681 // CHECK12-NEXT: [[IT:%.*]] = alloca i64, align 8 14682 // CHECK12-NEXT: [[LIN27:%.*]] = alloca i32, align 4 14683 // CHECK12-NEXT: [[A28:%.*]] = alloca i32, align 4 14684 // CHECK12-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 14685 // CHECK12-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 14686 // CHECK12-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 14687 // CHECK12-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 14688 // CHECK12-NEXT: [[IT53:%.*]] = alloca i16, align 2 14689 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 14690 // CHECK12-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 14691 // CHECK12-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 14692 // CHECK12-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 14693 // CHECK12-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 14694 // CHECK12-NEXT: [[IT72:%.*]] = alloca i8, align 1 14695 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14696 // CHECK12-NEXT: store i32 0, i32* [[A]], align 4 14697 // CHECK12-NEXT: store i16 0, i16* [[AA]], align 2 14698 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14699 // CHECK12-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 14700 // CHECK12-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 14701 // CHECK12-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 14702 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 14703 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 14704 // CHECK12-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 14705 // CHECK12-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 14706 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 14707 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14708 // CHECK12-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 14709 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14710 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 14711 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14712 // CHECK12: omp.inner.for.cond: 14713 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14714 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 14715 // CHECK12-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 14716 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14717 // CHECK12: omp.inner.for.body: 14718 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14719 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 14720 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 14721 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 14722 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14723 // CHECK12: omp.body.continue: 14724 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14725 // CHECK12: omp.inner.for.inc: 14726 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14727 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 14728 // CHECK12-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 14729 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 14730 // CHECK12: omp.inner.for.end: 14731 // CHECK12-NEXT: store i32 33, i32* [[I]], align 4 14732 // CHECK12-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 14733 // CHECK12-NEXT: store i64 [[CALL]], i64* [[K]], align 8 14734 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 14735 // CHECK12-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 14736 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 14737 // CHECK12-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 14738 // CHECK12-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 14739 // CHECK12-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 14740 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 14741 // CHECK12: omp.inner.for.cond9: 14742 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14743 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 14744 // CHECK12-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 14745 // CHECK12-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 14746 // CHECK12: omp.inner.for.body11: 14747 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14748 // CHECK12-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 14749 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 14750 // CHECK12-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 14751 // CHECK12-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 14752 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14753 // CHECK12-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 14754 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 14755 // CHECK12-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 14756 // CHECK12-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 14757 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 14758 // CHECK12-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 14759 // CHECK12-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 14760 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 14761 // CHECK12: omp.body.continue16: 14762 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 14763 // CHECK12: omp.inner.for.inc17: 14764 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14765 // CHECK12-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 14766 // CHECK12-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 14767 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 14768 // CHECK12: omp.inner.for.end19: 14769 // CHECK12-NEXT: store i32 1, i32* [[I7]], align 4 14770 // CHECK12-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 14771 // CHECK12-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 14772 // CHECK12-NEXT: store i32 12, i32* [[LIN]], align 4 14773 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 14774 // CHECK12-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 14775 // CHECK12-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 14776 // CHECK12-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 14777 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 14778 // CHECK12-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 14779 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 14780 // CHECK12-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 14781 // CHECK12-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 14782 // CHECK12-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 14783 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 14784 // CHECK12: omp.inner.for.cond29: 14785 // CHECK12-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14786 // CHECK12-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 14787 // CHECK12-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 14788 // CHECK12-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 14789 // CHECK12: omp.inner.for.body31: 14790 // CHECK12-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14791 // CHECK12-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 14792 // CHECK12-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 14793 // CHECK12-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 14794 // CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 14795 // CHECK12-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 14796 // CHECK12-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14797 // CHECK12-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 14798 // CHECK12-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 14799 // CHECK12-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 14800 // CHECK12-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 14801 // CHECK12-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 14802 // CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 14803 // CHECK12-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 14804 // CHECK12-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14805 // CHECK12-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 14806 // CHECK12-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 14807 // CHECK12-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 14808 // CHECK12-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 14809 // CHECK12-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 14810 // CHECK12-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 14811 // CHECK12-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 14812 // CHECK12-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 14813 // CHECK12-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 14814 // CHECK12-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 14815 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 14816 // CHECK12: omp.body.continue45: 14817 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 14818 // CHECK12: omp.inner.for.inc46: 14819 // CHECK12-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14820 // CHECK12-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 14821 // CHECK12-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 14822 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 14823 // CHECK12: omp.inner.for.end48: 14824 // CHECK12-NEXT: store i64 400, i64* [[IT]], align 8 14825 // CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 14826 // CHECK12-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 14827 // CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 14828 // CHECK12-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 14829 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 14830 // CHECK12-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 14831 // CHECK12-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 14832 // CHECK12-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 14833 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 14834 // CHECK12: omp.inner.for.cond54: 14835 // CHECK12-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14836 // CHECK12-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 14837 // CHECK12-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 14838 // CHECK12-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 14839 // CHECK12: omp.inner.for.body56: 14840 // CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14841 // CHECK12-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 14842 // CHECK12-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 14843 // CHECK12-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 14844 // CHECK12-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 14845 // CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 14846 // CHECK12-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 14847 // CHECK12-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 14848 // CHECK12-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 14849 // CHECK12-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 14850 // CHECK12-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 14851 // CHECK12-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 14852 // CHECK12-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 14853 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 14854 // CHECK12: omp.body.continue64: 14855 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 14856 // CHECK12: omp.inner.for.inc65: 14857 // CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14858 // CHECK12-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 14859 // CHECK12-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 14860 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 14861 // CHECK12: omp.inner.for.end67: 14862 // CHECK12-NEXT: store i16 22, i16* [[IT53]], align 2 14863 // CHECK12-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 14864 // CHECK12-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 14865 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 14866 // CHECK12-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 14867 // CHECK12-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 14868 // CHECK12-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 14869 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 14870 // CHECK12: omp.inner.for.cond73: 14871 // CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14872 // CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 14873 // CHECK12-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 14874 // CHECK12-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 14875 // CHECK12: omp.inner.for.body75: 14876 // CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14877 // CHECK12-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 14878 // CHECK12-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 14879 // CHECK12-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 14880 // CHECK12-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 14881 // CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 14882 // CHECK12-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 14883 // CHECK12-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 14884 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 14885 // CHECK12-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 14886 // CHECK12-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 14887 // CHECK12-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 14888 // CHECK12-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 14889 // CHECK12-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 14890 // CHECK12-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 14891 // CHECK12-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 14892 // CHECK12-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 14893 // CHECK12-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 14894 // CHECK12-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 14895 // CHECK12-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 14896 // CHECK12-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 14897 // CHECK12-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 14898 // CHECK12-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 14899 // CHECK12-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 14900 // CHECK12-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 14901 // CHECK12-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 14902 // CHECK12-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 14903 // CHECK12-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 14904 // CHECK12-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 14905 // CHECK12-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 14906 // CHECK12-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 14907 // CHECK12-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 14908 // CHECK12-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 14909 // CHECK12-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 14910 // CHECK12-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 14911 // CHECK12-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 14912 // CHECK12-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 14913 // CHECK12-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 14914 // CHECK12-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 14915 // CHECK12-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 14916 // CHECK12-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 14917 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 14918 // CHECK12: omp.body.continue97: 14919 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 14920 // CHECK12: omp.inner.for.inc98: 14921 // CHECK12-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14922 // CHECK12-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 14923 // CHECK12-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 14924 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 14925 // CHECK12: omp.inner.for.end100: 14926 // CHECK12-NEXT: store i8 96, i8* [[IT72]], align 1 14927 // CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 14928 // CHECK12-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 14929 // CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 14930 // CHECK12-NEXT: ret i32 [[TMP56]] 14931 // 14932 // 14933 // CHECK12-LABEL: define {{[^@]+}}@_Z3bari 14934 // CHECK12-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14935 // CHECK12-NEXT: entry: 14936 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14937 // CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 14938 // CHECK12-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 14939 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14940 // CHECK12-NEXT: store i32 0, i32* [[A]], align 4 14941 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14942 // CHECK12-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 14943 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 14944 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 14945 // CHECK12-NEXT: store i32 [[ADD]], i32* [[A]], align 4 14946 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 14947 // CHECK12-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 14948 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 14949 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 14950 // CHECK12-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 14951 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 14952 // CHECK12-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 14953 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 14954 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 14955 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 14956 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14957 // CHECK12-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 14958 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 14959 // CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 14960 // CHECK12-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 14961 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14962 // CHECK12-NEXT: ret i32 [[TMP8]] 14963 // 14964 // 14965 // CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 14966 // CHECK12-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 14967 // CHECK12-NEXT: entry: 14968 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 14969 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14970 // CHECK12-NEXT: [[B:%.*]] = alloca i32, align 4 14971 // CHECK12-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14972 // CHECK12-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14973 // CHECK12-NEXT: [[TMP:%.*]] = alloca i64, align 4 14974 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14975 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14976 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14977 // CHECK12-NEXT: [[IT:%.*]] = alloca i64, align 8 14978 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 14979 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14980 // CHECK12-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 14981 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14982 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 14983 // CHECK12-NEXT: store i32 [[ADD]], i32* [[B]], align 4 14984 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 14985 // CHECK12-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 14986 // CHECK12-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 14987 // CHECK12-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 14988 // CHECK12-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 14989 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 14990 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14991 // CHECK12-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 14992 // CHECK12-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14993 // CHECK12-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8 14994 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14995 // CHECK12: omp.inner.for.cond: 14996 // CHECK12-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 14997 // CHECK12-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 14998 // CHECK12-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] 14999 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15000 // CHECK12: omp.inner.for.body: 15001 // CHECK12-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 15002 // CHECK12-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 15003 // CHECK12-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 15004 // CHECK12-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 15005 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 15006 // CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double 15007 // CHECK12-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 15008 // CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 15009 // CHECK12-NEXT: store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19 15010 // CHECK12-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 15011 // CHECK12-NEXT: [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19 15012 // CHECK12-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 15013 // CHECK12-NEXT: store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19 15014 // CHECK12-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 15015 // CHECK12-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] 15016 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]] 15017 // CHECK12-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 15018 // CHECK12-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19 15019 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15020 // CHECK12: omp.body.continue: 15021 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15022 // CHECK12: omp.inner.for.inc: 15023 // CHECK12-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 15024 // CHECK12-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 15025 // CHECK12-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 15026 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 15027 // CHECK12: omp.inner.for.end: 15028 // CHECK12-NEXT: store i64 400, i64* [[IT]], align 8 15029 // CHECK12-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 15030 // CHECK12-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 15031 // CHECK12-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1 15032 // CHECK12-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 15033 // CHECK12-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 15034 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4 15035 // CHECK12-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] 15036 // CHECK12-NEXT: [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 15037 // CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP15]]) 15038 // CHECK12-NEXT: ret i32 [[ADD10]] 15039 // 15040 // 15041 // CHECK12-LABEL: define {{[^@]+}}@_ZL7fstatici 15042 // CHECK12-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 15043 // CHECK12-NEXT: entry: 15044 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15045 // CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 15046 // CHECK12-NEXT: [[AA:%.*]] = alloca i16, align 2 15047 // CHECK12-NEXT: [[AAA:%.*]] = alloca i8, align 1 15048 // CHECK12-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 15049 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 15050 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15051 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15052 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15053 // CHECK12-NEXT: store i32 0, i32* [[A]], align 4 15054 // CHECK12-NEXT: store i16 0, i16* [[AA]], align 2 15055 // CHECK12-NEXT: store i8 0, i8* [[AAA]], align 1 15056 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15057 // CHECK12-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 15058 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 15059 // CHECK12-NEXT: ret i32 [[TMP0]] 15060 // 15061 // 15062 // CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 15063 // CHECK12-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 15064 // CHECK12-NEXT: entry: 15065 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15066 // CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 15067 // CHECK12-NEXT: [[AA:%.*]] = alloca i16, align 2 15068 // CHECK12-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 15069 // CHECK12-NEXT: [[TMP:%.*]] = alloca i64, align 4 15070 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 15071 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 15072 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 15073 // CHECK12-NEXT: [[I:%.*]] = alloca i64, align 8 15074 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15075 // CHECK12-NEXT: store i32 0, i32* [[A]], align 4 15076 // CHECK12-NEXT: store i16 0, i16* [[AA]], align 2 15077 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 15078 // CHECK12-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 15079 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 15080 // CHECK12-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 15081 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15082 // CHECK12: omp.inner.for.cond: 15083 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 15084 // CHECK12-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22 15085 // CHECK12-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 15086 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15087 // CHECK12: omp.inner.for.body: 15088 // CHECK12-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 15089 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 15090 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 15091 // CHECK12-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22 15092 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22 15093 // CHECK12-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 15094 // CHECK12-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22 15095 // CHECK12-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22 15096 // CHECK12-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 15097 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 15098 // CHECK12-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 15099 // CHECK12-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22 15100 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 15101 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 15102 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 15103 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 15104 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15105 // CHECK12: omp.body.continue: 15106 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15107 // CHECK12: omp.inner.for.inc: 15108 // CHECK12-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 15109 // CHECK12-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 15110 // CHECK12-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 15111 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 15112 // CHECK12: omp.inner.for.end: 15113 // CHECK12-NEXT: store i64 11, i64* [[I]], align 8 15114 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 15115 // CHECK12-NEXT: ret i32 [[TMP8]] 15116 // 15117 // 15118 // CHECK13-LABEL: define {{[^@]+}}@_Z7get_valv 15119 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] { 15120 // CHECK13-NEXT: entry: 15121 // CHECK13-NEXT: ret i64 0 15122 // 15123 // 15124 // CHECK13-LABEL: define {{[^@]+}}@_Z3fooi 15125 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 15126 // CHECK13-NEXT: entry: 15127 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15128 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 15129 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 15130 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x float], align 4 15131 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 15132 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 15133 // CHECK13-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 15134 // CHECK13-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 15135 // CHECK13-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 15136 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 15137 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15138 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15139 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15140 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 15141 // CHECK13-NEXT: [[K:%.*]] = alloca i64, align 8 15142 // CHECK13-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 15143 // CHECK13-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 15144 // CHECK13-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 15145 // CHECK13-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 15146 // CHECK13-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 15147 // CHECK13-NEXT: [[I7:%.*]] = alloca i32, align 4 15148 // CHECK13-NEXT: [[K8:%.*]] = alloca i64, align 8 15149 // CHECK13-NEXT: [[LIN:%.*]] = alloca i32, align 4 15150 // CHECK13-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 15151 // CHECK13-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 15152 // CHECK13-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 15153 // CHECK13-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 15154 // CHECK13-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 15155 // CHECK13-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 15156 // CHECK13-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 15157 // CHECK13-NEXT: [[IT:%.*]] = alloca i64, align 8 15158 // CHECK13-NEXT: [[LIN27:%.*]] = alloca i32, align 4 15159 // CHECK13-NEXT: [[A28:%.*]] = alloca i32, align 4 15160 // CHECK13-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 15161 // CHECK13-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 15162 // CHECK13-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 15163 // CHECK13-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 15164 // CHECK13-NEXT: [[IT53:%.*]] = alloca i16, align 2 15165 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 15166 // CHECK13-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 15167 // CHECK13-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 15168 // CHECK13-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 15169 // CHECK13-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 15170 // CHECK13-NEXT: [[IT72:%.*]] = alloca i8, align 1 15171 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15172 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 15173 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 15174 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15175 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 15176 // CHECK13-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 15177 // CHECK13-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 15178 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 15179 // CHECK13-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 15180 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 15181 // CHECK13-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 15182 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 15183 // CHECK13-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 15184 // CHECK13-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 15185 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15186 // CHECK13-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 15187 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15188 // CHECK13-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 15189 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15190 // CHECK13: omp.inner.for.cond: 15191 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15192 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 15193 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 15194 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15195 // CHECK13: omp.inner.for.body: 15196 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15197 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 15198 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 15199 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 15200 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15201 // CHECK13: omp.body.continue: 15202 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15203 // CHECK13: omp.inner.for.inc: 15204 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15205 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 15206 // CHECK13-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15207 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 15208 // CHECK13: omp.inner.for.end: 15209 // CHECK13-NEXT: store i32 33, i32* [[I]], align 4 15210 // CHECK13-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 15211 // CHECK13-NEXT: store i64 [[CALL]], i64* [[K]], align 8 15212 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 15213 // CHECK13-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 15214 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 15215 // CHECK13-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 15216 // CHECK13-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 15217 // CHECK13-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 15218 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 15219 // CHECK13: omp.inner.for.cond9: 15220 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15221 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 15222 // CHECK13-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 15223 // CHECK13-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 15224 // CHECK13: omp.inner.for.body11: 15225 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15226 // CHECK13-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 15227 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 15228 // CHECK13-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 15229 // CHECK13-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 15230 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15231 // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 15232 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 15233 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 15234 // CHECK13-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 15235 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 15236 // CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 15237 // CHECK13-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 15238 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 15239 // CHECK13: omp.body.continue16: 15240 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 15241 // CHECK13: omp.inner.for.inc17: 15242 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15243 // CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 15244 // CHECK13-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15245 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 15246 // CHECK13: omp.inner.for.end19: 15247 // CHECK13-NEXT: store i32 1, i32* [[I7]], align 4 15248 // CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 15249 // CHECK13-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 15250 // CHECK13-NEXT: store i32 12, i32* [[LIN]], align 4 15251 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 15252 // CHECK13-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 15253 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 15254 // CHECK13-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 15255 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 15256 // CHECK13-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 15257 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 15258 // CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 15259 // CHECK13-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 15260 // CHECK13-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 15261 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 15262 // CHECK13: omp.inner.for.cond29: 15263 // CHECK13-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15264 // CHECK13-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 15265 // CHECK13-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 15266 // CHECK13-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 15267 // CHECK13: omp.inner.for.body31: 15268 // CHECK13-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15269 // CHECK13-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 15270 // CHECK13-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 15271 // CHECK13-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 15272 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 15273 // CHECK13-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 15274 // CHECK13-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15275 // CHECK13-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 15276 // CHECK13-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 15277 // CHECK13-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 15278 // CHECK13-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 15279 // CHECK13-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 15280 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 15281 // CHECK13-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 15282 // CHECK13-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15283 // CHECK13-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 15284 // CHECK13-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 15285 // CHECK13-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 15286 // CHECK13-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 15287 // CHECK13-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 15288 // CHECK13-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 15289 // CHECK13-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 15290 // CHECK13-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 15291 // CHECK13-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 15292 // CHECK13-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 15293 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 15294 // CHECK13: omp.body.continue45: 15295 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 15296 // CHECK13: omp.inner.for.inc46: 15297 // CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15298 // CHECK13-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 15299 // CHECK13-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15300 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 15301 // CHECK13: omp.inner.for.end48: 15302 // CHECK13-NEXT: store i64 400, i64* [[IT]], align 8 15303 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 15304 // CHECK13-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 15305 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 15306 // CHECK13-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 15307 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 15308 // CHECK13-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 15309 // CHECK13-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 15310 // CHECK13-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 15311 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 15312 // CHECK13: omp.inner.for.cond54: 15313 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15314 // CHECK13-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 15315 // CHECK13-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 15316 // CHECK13-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 15317 // CHECK13: omp.inner.for.body56: 15318 // CHECK13-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15319 // CHECK13-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 15320 // CHECK13-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 15321 // CHECK13-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 15322 // CHECK13-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 15323 // CHECK13-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 15324 // CHECK13-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 15325 // CHECK13-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 15326 // CHECK13-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 15327 // CHECK13-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 15328 // CHECK13-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 15329 // CHECK13-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 15330 // CHECK13-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 15331 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 15332 // CHECK13: omp.body.continue64: 15333 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 15334 // CHECK13: omp.inner.for.inc65: 15335 // CHECK13-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15336 // CHECK13-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 15337 // CHECK13-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15338 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 15339 // CHECK13: omp.inner.for.end67: 15340 // CHECK13-NEXT: store i16 22, i16* [[IT53]], align 2 15341 // CHECK13-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 15342 // CHECK13-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 15343 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 15344 // CHECK13-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 15345 // CHECK13-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 15346 // CHECK13-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 15347 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 15348 // CHECK13: omp.inner.for.cond73: 15349 // CHECK13-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15350 // CHECK13-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 15351 // CHECK13-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 15352 // CHECK13-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 15353 // CHECK13: omp.inner.for.body75: 15354 // CHECK13-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15355 // CHECK13-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 15356 // CHECK13-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 15357 // CHECK13-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 15358 // CHECK13-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 15359 // CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 15360 // CHECK13-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 15361 // CHECK13-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 15362 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 15363 // CHECK13-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 15364 // CHECK13-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 15365 // CHECK13-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 15366 // CHECK13-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 15367 // CHECK13-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 15368 // CHECK13-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 15369 // CHECK13-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 15370 // CHECK13-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 15371 // CHECK13-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 15372 // CHECK13-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 15373 // CHECK13-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 15374 // CHECK13-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 15375 // CHECK13-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 15376 // CHECK13-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 15377 // CHECK13-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 15378 // CHECK13-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 15379 // CHECK13-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 15380 // CHECK13-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 15381 // CHECK13-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 15382 // CHECK13-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 15383 // CHECK13-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 15384 // CHECK13-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 15385 // CHECK13-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 15386 // CHECK13-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 15387 // CHECK13-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 15388 // CHECK13-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 15389 // CHECK13-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 15390 // CHECK13-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 15391 // CHECK13-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 15392 // CHECK13-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 15393 // CHECK13-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 15394 // CHECK13-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 15395 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 15396 // CHECK13: omp.body.continue97: 15397 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 15398 // CHECK13: omp.inner.for.inc98: 15399 // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15400 // CHECK13-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 15401 // CHECK13-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15402 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 15403 // CHECK13: omp.inner.for.end100: 15404 // CHECK13-NEXT: store i8 96, i8* [[IT72]], align 1 15405 // CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 15406 // CHECK13-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 15407 // CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 15408 // CHECK13-NEXT: ret i32 [[TMP58]] 15409 // 15410 // 15411 // CHECK13-LABEL: define {{[^@]+}}@_Z3bari 15412 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 15413 // CHECK13-NEXT: entry: 15414 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15415 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 15416 // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 15417 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15418 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 15419 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15420 // CHECK13-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 15421 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 15422 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 15423 // CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4 15424 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 15425 // CHECK13-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 15426 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 15427 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 15428 // CHECK13-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 15429 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 15430 // CHECK13-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 15431 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 15432 // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 15433 // CHECK13-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 15434 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 15435 // CHECK13-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 15436 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 15437 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 15438 // CHECK13-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 15439 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 15440 // CHECK13-NEXT: ret i32 [[TMP8]] 15441 // 15442 // 15443 // CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 15444 // CHECK13-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 15445 // CHECK13-NEXT: entry: 15446 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 15447 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15448 // CHECK13-NEXT: [[B:%.*]] = alloca i32, align 4 15449 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 15450 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 15451 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 15452 // CHECK13-NEXT: [[TMP:%.*]] = alloca i64, align 8 15453 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 15454 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 15455 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 15456 // CHECK13-NEXT: [[IT:%.*]] = alloca i64, align 8 15457 // CHECK13-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 15458 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15459 // CHECK13-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 15460 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15461 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 15462 // CHECK13-NEXT: store i32 [[ADD]], i32* [[B]], align 4 15463 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 15464 // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 15465 // CHECK13-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 15466 // CHECK13-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 15467 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 15468 // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 15469 // CHECK13-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 15470 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 15471 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 15472 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 15473 // CHECK13-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 15474 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 15475 // CHECK13-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 15476 // CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 15477 // CHECK13-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 15478 // CHECK13-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 15479 // CHECK13-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 15480 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 15481 // CHECK13: omp_if.then: 15482 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15483 // CHECK13: omp.inner.for.cond: 15484 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 15485 // CHECK13-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 15486 // CHECK13-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] 15487 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15488 // CHECK13: omp.inner.for.body: 15489 // CHECK13-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 15490 // CHECK13-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 15491 // CHECK13-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 15492 // CHECK13-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 15493 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 15494 // CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double 15495 // CHECK13-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 15496 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 15497 // CHECK13-NEXT: store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18 15498 // CHECK13-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 15499 // CHECK13-NEXT: [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 15500 // CHECK13-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 15501 // CHECK13-NEXT: store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 15502 // CHECK13-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 15503 // CHECK13-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 15504 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 15505 // CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 15506 // CHECK13-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18 15507 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15508 // CHECK13: omp.body.continue: 15509 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15510 // CHECK13: omp.inner.for.inc: 15511 // CHECK13-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 15512 // CHECK13-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 15513 // CHECK13-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 15514 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 15515 // CHECK13: omp.inner.for.end: 15516 // CHECK13-NEXT: br label [[OMP_IF_END:%.*]] 15517 // CHECK13: omp_if.else: 15518 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 15519 // CHECK13: omp.inner.for.cond8: 15520 // CHECK13-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15521 // CHECK13-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 15522 // CHECK13-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] 15523 // CHECK13-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 15524 // CHECK13: omp.inner.for.body10: 15525 // CHECK13-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15526 // CHECK13-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 15527 // CHECK13-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 15528 // CHECK13-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 15529 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[B]], align 4 15530 // CHECK13-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double 15531 // CHECK13-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 15532 // CHECK13-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 15533 // CHECK13-NEXT: store double [[ADD14]], double* [[A15]], align 8 15534 // CHECK13-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 15535 // CHECK13-NEXT: [[TMP19:%.*]] = load double, double* [[A16]], align 8 15536 // CHECK13-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 15537 // CHECK13-NEXT: store double [[INC17]], double* [[A16]], align 8 15538 // CHECK13-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 15539 // CHECK13-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]] 15540 // CHECK13-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]] 15541 // CHECK13-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1 15542 // CHECK13-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 15543 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 15544 // CHECK13: omp.body.continue21: 15545 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 15546 // CHECK13: omp.inner.for.inc22: 15547 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15548 // CHECK13-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 15549 // CHECK13-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 15550 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] 15551 // CHECK13: omp.inner.for.end24: 15552 // CHECK13-NEXT: br label [[OMP_IF_END]] 15553 // CHECK13: omp_if.end: 15554 // CHECK13-NEXT: store i64 400, i64* [[IT]], align 8 15555 // CHECK13-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] 15556 // CHECK13-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]] 15557 // CHECK13-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1 15558 // CHECK13-NEXT: [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 15559 // CHECK13-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 15560 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[B]], align 4 15561 // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] 15562 // CHECK13-NEXT: [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 15563 // CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP25]]) 15564 // CHECK13-NEXT: ret i32 [[ADD28]] 15565 // 15566 // 15567 // CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici 15568 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 15569 // CHECK13-NEXT: entry: 15570 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15571 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 15572 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 15573 // CHECK13-NEXT: [[AAA:%.*]] = alloca i8, align 1 15574 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 15575 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 15576 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15577 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15578 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15579 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 15580 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 15581 // CHECK13-NEXT: store i8 0, i8* [[AAA]], align 1 15582 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15583 // CHECK13-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 15584 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 15585 // CHECK13-NEXT: ret i32 [[TMP0]] 15586 // 15587 // 15588 // CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 15589 // CHECK13-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 15590 // CHECK13-NEXT: entry: 15591 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15592 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 15593 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 15594 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 15595 // CHECK13-NEXT: [[TMP:%.*]] = alloca i64, align 8 15596 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 15597 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 15598 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 15599 // CHECK13-NEXT: [[I:%.*]] = alloca i64, align 8 15600 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15601 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 15602 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 15603 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 15604 // CHECK13-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 15605 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 15606 // CHECK13-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 15607 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15608 // CHECK13: omp.inner.for.cond: 15609 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 15610 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24 15611 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 15612 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15613 // CHECK13: omp.inner.for.body: 15614 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 15615 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 15616 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 15617 // CHECK13-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24 15618 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24 15619 // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 15620 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24 15621 // CHECK13-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24 15622 // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 15623 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 15624 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 15625 // CHECK13-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24 15626 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 15627 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 15628 // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 15629 // CHECK13-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 15630 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15631 // CHECK13: omp.body.continue: 15632 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15633 // CHECK13: omp.inner.for.inc: 15634 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 15635 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 15636 // CHECK13-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 15637 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 15638 // CHECK13: omp.inner.for.end: 15639 // CHECK13-NEXT: store i64 11, i64* [[I]], align 8 15640 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 15641 // CHECK13-NEXT: ret i32 [[TMP8]] 15642 // 15643 // 15644 // CHECK14-LABEL: define {{[^@]+}}@_Z7get_valv 15645 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] { 15646 // CHECK14-NEXT: entry: 15647 // CHECK14-NEXT: ret i64 0 15648 // 15649 // 15650 // CHECK14-LABEL: define {{[^@]+}}@_Z3fooi 15651 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 15652 // CHECK14-NEXT: entry: 15653 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15654 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 15655 // CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2 15656 // CHECK14-NEXT: [[B:%.*]] = alloca [10 x float], align 4 15657 // CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 15658 // CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 15659 // CHECK14-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 15660 // CHECK14-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 15661 // CHECK14-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 15662 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 15663 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15664 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15665 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15666 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 15667 // CHECK14-NEXT: [[K:%.*]] = alloca i64, align 8 15668 // CHECK14-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 15669 // CHECK14-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 15670 // CHECK14-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 15671 // CHECK14-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 15672 // CHECK14-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 15673 // CHECK14-NEXT: [[I7:%.*]] = alloca i32, align 4 15674 // CHECK14-NEXT: [[K8:%.*]] = alloca i64, align 8 15675 // CHECK14-NEXT: [[LIN:%.*]] = alloca i32, align 4 15676 // CHECK14-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 15677 // CHECK14-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 15678 // CHECK14-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 15679 // CHECK14-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 15680 // CHECK14-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 15681 // CHECK14-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 15682 // CHECK14-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 15683 // CHECK14-NEXT: [[IT:%.*]] = alloca i64, align 8 15684 // CHECK14-NEXT: [[LIN27:%.*]] = alloca i32, align 4 15685 // CHECK14-NEXT: [[A28:%.*]] = alloca i32, align 4 15686 // CHECK14-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 15687 // CHECK14-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 15688 // CHECK14-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 15689 // CHECK14-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 15690 // CHECK14-NEXT: [[IT53:%.*]] = alloca i16, align 2 15691 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 15692 // CHECK14-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 15693 // CHECK14-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 15694 // CHECK14-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 15695 // CHECK14-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 15696 // CHECK14-NEXT: [[IT72:%.*]] = alloca i8, align 1 15697 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15698 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 15699 // CHECK14-NEXT: store i16 0, i16* [[AA]], align 2 15700 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15701 // CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 15702 // CHECK14-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 15703 // CHECK14-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 15704 // CHECK14-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 15705 // CHECK14-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 15706 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 15707 // CHECK14-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 15708 // CHECK14-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 15709 // CHECK14-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 15710 // CHECK14-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 15711 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15712 // CHECK14-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 15713 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15714 // CHECK14-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 15715 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15716 // CHECK14: omp.inner.for.cond: 15717 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15718 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 15719 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 15720 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15721 // CHECK14: omp.inner.for.body: 15722 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15723 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 15724 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 15725 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 15726 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15727 // CHECK14: omp.body.continue: 15728 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15729 // CHECK14: omp.inner.for.inc: 15730 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15731 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 15732 // CHECK14-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 15733 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 15734 // CHECK14: omp.inner.for.end: 15735 // CHECK14-NEXT: store i32 33, i32* [[I]], align 4 15736 // CHECK14-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 15737 // CHECK14-NEXT: store i64 [[CALL]], i64* [[K]], align 8 15738 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 15739 // CHECK14-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 15740 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 15741 // CHECK14-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 15742 // CHECK14-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 15743 // CHECK14-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 15744 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 15745 // CHECK14: omp.inner.for.cond9: 15746 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15747 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 15748 // CHECK14-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 15749 // CHECK14-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 15750 // CHECK14: omp.inner.for.body11: 15751 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15752 // CHECK14-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 15753 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 15754 // CHECK14-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 15755 // CHECK14-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 15756 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15757 // CHECK14-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 15758 // CHECK14-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 15759 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 15760 // CHECK14-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 15761 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 15762 // CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 15763 // CHECK14-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 15764 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 15765 // CHECK14: omp.body.continue16: 15766 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 15767 // CHECK14: omp.inner.for.inc17: 15768 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15769 // CHECK14-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 15770 // CHECK14-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 15771 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 15772 // CHECK14: omp.inner.for.end19: 15773 // CHECK14-NEXT: store i32 1, i32* [[I7]], align 4 15774 // CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 15775 // CHECK14-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 15776 // CHECK14-NEXT: store i32 12, i32* [[LIN]], align 4 15777 // CHECK14-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 15778 // CHECK14-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 15779 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 15780 // CHECK14-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 15781 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 15782 // CHECK14-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 15783 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 15784 // CHECK14-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 15785 // CHECK14-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 15786 // CHECK14-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 15787 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 15788 // CHECK14: omp.inner.for.cond29: 15789 // CHECK14-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15790 // CHECK14-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 15791 // CHECK14-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 15792 // CHECK14-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 15793 // CHECK14: omp.inner.for.body31: 15794 // CHECK14-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15795 // CHECK14-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 15796 // CHECK14-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 15797 // CHECK14-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 15798 // CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 15799 // CHECK14-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 15800 // CHECK14-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15801 // CHECK14-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 15802 // CHECK14-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 15803 // CHECK14-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 15804 // CHECK14-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 15805 // CHECK14-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 15806 // CHECK14-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 15807 // CHECK14-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 15808 // CHECK14-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15809 // CHECK14-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 15810 // CHECK14-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 15811 // CHECK14-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 15812 // CHECK14-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 15813 // CHECK14-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 15814 // CHECK14-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 15815 // CHECK14-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 15816 // CHECK14-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 15817 // CHECK14-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 15818 // CHECK14-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 15819 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 15820 // CHECK14: omp.body.continue45: 15821 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 15822 // CHECK14: omp.inner.for.inc46: 15823 // CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15824 // CHECK14-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 15825 // CHECK14-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 15826 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 15827 // CHECK14: omp.inner.for.end48: 15828 // CHECK14-NEXT: store i64 400, i64* [[IT]], align 8 15829 // CHECK14-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 15830 // CHECK14-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 15831 // CHECK14-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 15832 // CHECK14-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 15833 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 15834 // CHECK14-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 15835 // CHECK14-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 15836 // CHECK14-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 15837 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 15838 // CHECK14: omp.inner.for.cond54: 15839 // CHECK14-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15840 // CHECK14-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 15841 // CHECK14-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 15842 // CHECK14-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 15843 // CHECK14: omp.inner.for.body56: 15844 // CHECK14-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15845 // CHECK14-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 15846 // CHECK14-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 15847 // CHECK14-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 15848 // CHECK14-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 15849 // CHECK14-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 15850 // CHECK14-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 15851 // CHECK14-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 15852 // CHECK14-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 15853 // CHECK14-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 15854 // CHECK14-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 15855 // CHECK14-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 15856 // CHECK14-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 15857 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 15858 // CHECK14: omp.body.continue64: 15859 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 15860 // CHECK14: omp.inner.for.inc65: 15861 // CHECK14-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15862 // CHECK14-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 15863 // CHECK14-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 15864 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 15865 // CHECK14: omp.inner.for.end67: 15866 // CHECK14-NEXT: store i16 22, i16* [[IT53]], align 2 15867 // CHECK14-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 15868 // CHECK14-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 15869 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 15870 // CHECK14-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 15871 // CHECK14-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 15872 // CHECK14-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 15873 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 15874 // CHECK14: omp.inner.for.cond73: 15875 // CHECK14-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15876 // CHECK14-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 15877 // CHECK14-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 15878 // CHECK14-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 15879 // CHECK14: omp.inner.for.body75: 15880 // CHECK14-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15881 // CHECK14-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 15882 // CHECK14-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 15883 // CHECK14-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 15884 // CHECK14-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 15885 // CHECK14-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 15886 // CHECK14-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 15887 // CHECK14-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 15888 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 15889 // CHECK14-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 15890 // CHECK14-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 15891 // CHECK14-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 15892 // CHECK14-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 15893 // CHECK14-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 15894 // CHECK14-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 15895 // CHECK14-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 15896 // CHECK14-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 15897 // CHECK14-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 15898 // CHECK14-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 15899 // CHECK14-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 15900 // CHECK14-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 15901 // CHECK14-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 15902 // CHECK14-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 15903 // CHECK14-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 15904 // CHECK14-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 15905 // CHECK14-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 15906 // CHECK14-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 15907 // CHECK14-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 15908 // CHECK14-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 15909 // CHECK14-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 15910 // CHECK14-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 15911 // CHECK14-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 15912 // CHECK14-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 15913 // CHECK14-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 15914 // CHECK14-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 15915 // CHECK14-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 15916 // CHECK14-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 15917 // CHECK14-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 15918 // CHECK14-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 15919 // CHECK14-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 15920 // CHECK14-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 15921 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 15922 // CHECK14: omp.body.continue97: 15923 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 15924 // CHECK14: omp.inner.for.inc98: 15925 // CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15926 // CHECK14-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 15927 // CHECK14-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 15928 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 15929 // CHECK14: omp.inner.for.end100: 15930 // CHECK14-NEXT: store i8 96, i8* [[IT72]], align 1 15931 // CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 15932 // CHECK14-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 15933 // CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 15934 // CHECK14-NEXT: ret i32 [[TMP58]] 15935 // 15936 // 15937 // CHECK14-LABEL: define {{[^@]+}}@_Z3bari 15938 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 15939 // CHECK14-NEXT: entry: 15940 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15941 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 15942 // CHECK14-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 15943 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15944 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 15945 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15946 // CHECK14-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 15947 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 15948 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 15949 // CHECK14-NEXT: store i32 [[ADD]], i32* [[A]], align 4 15950 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 15951 // CHECK14-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 15952 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 15953 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 15954 // CHECK14-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 15955 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 15956 // CHECK14-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 15957 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 15958 // CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 15959 // CHECK14-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 15960 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 15961 // CHECK14-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 15962 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 15963 // CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 15964 // CHECK14-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 15965 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 15966 // CHECK14-NEXT: ret i32 [[TMP8]] 15967 // 15968 // 15969 // CHECK14-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 15970 // CHECK14-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 15971 // CHECK14-NEXT: entry: 15972 // CHECK14-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 15973 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15974 // CHECK14-NEXT: [[B:%.*]] = alloca i32, align 4 15975 // CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 15976 // CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 15977 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 15978 // CHECK14-NEXT: [[TMP:%.*]] = alloca i64, align 8 15979 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 15980 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 15981 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 15982 // CHECK14-NEXT: [[IT:%.*]] = alloca i64, align 8 15983 // CHECK14-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 15984 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15985 // CHECK14-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 15986 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15987 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 15988 // CHECK14-NEXT: store i32 [[ADD]], i32* [[B]], align 4 15989 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 15990 // CHECK14-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 15991 // CHECK14-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 15992 // CHECK14-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 15993 // CHECK14-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 15994 // CHECK14-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 15995 // CHECK14-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 15996 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 15997 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 15998 // CHECK14-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 15999 // CHECK14-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 16000 // CHECK14-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16001 // CHECK14-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 16002 // CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16003 // CHECK14-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 16004 // CHECK14-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 16005 // CHECK14-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 16006 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 16007 // CHECK14: omp_if.then: 16008 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16009 // CHECK14: omp.inner.for.cond: 16010 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 16011 // CHECK14-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 16012 // CHECK14-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] 16013 // CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16014 // CHECK14: omp.inner.for.body: 16015 // CHECK14-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 16016 // CHECK14-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 16017 // CHECK14-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 16018 // CHECK14-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 16019 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 16020 // CHECK14-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double 16021 // CHECK14-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 16022 // CHECK14-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 16023 // CHECK14-NEXT: store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18 16024 // CHECK14-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16025 // CHECK14-NEXT: [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 16026 // CHECK14-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 16027 // CHECK14-NEXT: store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 16028 // CHECK14-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 16029 // CHECK14-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 16030 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 16031 // CHECK14-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 16032 // CHECK14-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18 16033 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16034 // CHECK14: omp.body.continue: 16035 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16036 // CHECK14: omp.inner.for.inc: 16037 // CHECK14-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 16038 // CHECK14-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 16039 // CHECK14-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 16040 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 16041 // CHECK14: omp.inner.for.end: 16042 // CHECK14-NEXT: br label [[OMP_IF_END:%.*]] 16043 // CHECK14: omp_if.else: 16044 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 16045 // CHECK14: omp.inner.for.cond8: 16046 // CHECK14-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16047 // CHECK14-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16048 // CHECK14-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] 16049 // CHECK14-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 16050 // CHECK14: omp.inner.for.body10: 16051 // CHECK14-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16052 // CHECK14-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 16053 // CHECK14-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 16054 // CHECK14-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 16055 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[B]], align 4 16056 // CHECK14-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double 16057 // CHECK14-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 16058 // CHECK14-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16059 // CHECK14-NEXT: store double [[ADD14]], double* [[A15]], align 8 16060 // CHECK14-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16061 // CHECK14-NEXT: [[TMP19:%.*]] = load double, double* [[A16]], align 8 16062 // CHECK14-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 16063 // CHECK14-NEXT: store double [[INC17]], double* [[A16]], align 8 16064 // CHECK14-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 16065 // CHECK14-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]] 16066 // CHECK14-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]] 16067 // CHECK14-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1 16068 // CHECK14-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 16069 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 16070 // CHECK14: omp.body.continue21: 16071 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 16072 // CHECK14: omp.inner.for.inc22: 16073 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16074 // CHECK14-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 16075 // CHECK14-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 16076 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] 16077 // CHECK14: omp.inner.for.end24: 16078 // CHECK14-NEXT: br label [[OMP_IF_END]] 16079 // CHECK14: omp_if.end: 16080 // CHECK14-NEXT: store i64 400, i64* [[IT]], align 8 16081 // CHECK14-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] 16082 // CHECK14-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]] 16083 // CHECK14-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1 16084 // CHECK14-NEXT: [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 16085 // CHECK14-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 16086 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[B]], align 4 16087 // CHECK14-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] 16088 // CHECK14-NEXT: [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 16089 // CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP25]]) 16090 // CHECK14-NEXT: ret i32 [[ADD28]] 16091 // 16092 // 16093 // CHECK14-LABEL: define {{[^@]+}}@_ZL7fstatici 16094 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 16095 // CHECK14-NEXT: entry: 16096 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16097 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 16098 // CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2 16099 // CHECK14-NEXT: [[AAA:%.*]] = alloca i8, align 1 16100 // CHECK14-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16101 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 16102 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16103 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16104 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16105 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 16106 // CHECK14-NEXT: store i16 0, i16* [[AA]], align 2 16107 // CHECK14-NEXT: store i8 0, i8* [[AAA]], align 1 16108 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16109 // CHECK14-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 16110 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 16111 // CHECK14-NEXT: ret i32 [[TMP0]] 16112 // 16113 // 16114 // CHECK14-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 16115 // CHECK14-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 16116 // CHECK14-NEXT: entry: 16117 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16118 // CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4 16119 // CHECK14-NEXT: [[AA:%.*]] = alloca i16, align 2 16120 // CHECK14-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16121 // CHECK14-NEXT: [[TMP:%.*]] = alloca i64, align 8 16122 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16123 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16124 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16125 // CHECK14-NEXT: [[I:%.*]] = alloca i64, align 8 16126 // CHECK14-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16127 // CHECK14-NEXT: store i32 0, i32* [[A]], align 4 16128 // CHECK14-NEXT: store i16 0, i16* [[AA]], align 2 16129 // CHECK14-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16130 // CHECK14-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 16131 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16132 // CHECK14-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 16133 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16134 // CHECK14: omp.inner.for.cond: 16135 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 16136 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24 16137 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 16138 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16139 // CHECK14: omp.inner.for.body: 16140 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 16141 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 16142 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 16143 // CHECK14-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24 16144 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24 16145 // CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 16146 // CHECK14-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24 16147 // CHECK14-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24 16148 // CHECK14-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 16149 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 16150 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 16151 // CHECK14-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24 16152 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 16153 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 16154 // CHECK14-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 16155 // CHECK14-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 16156 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16157 // CHECK14: omp.body.continue: 16158 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16159 // CHECK14: omp.inner.for.inc: 16160 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 16161 // CHECK14-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 16162 // CHECK14-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 16163 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 16164 // CHECK14: omp.inner.for.end: 16165 // CHECK14-NEXT: store i64 11, i64* [[I]], align 8 16166 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 16167 // CHECK14-NEXT: ret i32 [[TMP8]] 16168 // 16169 // 16170 // CHECK15-LABEL: define {{[^@]+}}@_Z7get_valv 16171 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] { 16172 // CHECK15-NEXT: entry: 16173 // CHECK15-NEXT: ret i64 0 16174 // 16175 // 16176 // CHECK15-LABEL: define {{[^@]+}}@_Z3fooi 16177 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16178 // CHECK15-NEXT: entry: 16179 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16180 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 16181 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 16182 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x float], align 4 16183 // CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 16184 // CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 16185 // CHECK15-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 16186 // CHECK15-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 16187 // CHECK15-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 16188 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 16189 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16190 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16191 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16192 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 16193 // CHECK15-NEXT: [[K:%.*]] = alloca i64, align 8 16194 // CHECK15-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 16195 // CHECK15-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 16196 // CHECK15-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 16197 // CHECK15-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 16198 // CHECK15-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 16199 // CHECK15-NEXT: [[I7:%.*]] = alloca i32, align 4 16200 // CHECK15-NEXT: [[K8:%.*]] = alloca i64, align 8 16201 // CHECK15-NEXT: [[LIN:%.*]] = alloca i32, align 4 16202 // CHECK15-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 16203 // CHECK15-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 16204 // CHECK15-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 16205 // CHECK15-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 16206 // CHECK15-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 16207 // CHECK15-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 16208 // CHECK15-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 16209 // CHECK15-NEXT: [[IT:%.*]] = alloca i64, align 8 16210 // CHECK15-NEXT: [[LIN27:%.*]] = alloca i32, align 4 16211 // CHECK15-NEXT: [[A28:%.*]] = alloca i32, align 4 16212 // CHECK15-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 16213 // CHECK15-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 16214 // CHECK15-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 16215 // CHECK15-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 16216 // CHECK15-NEXT: [[IT53:%.*]] = alloca i16, align 2 16217 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 16218 // CHECK15-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 16219 // CHECK15-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 16220 // CHECK15-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 16221 // CHECK15-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 16222 // CHECK15-NEXT: [[IT72:%.*]] = alloca i8, align 1 16223 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16224 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 16225 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 16226 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16227 // CHECK15-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 16228 // CHECK15-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 16229 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 16230 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 16231 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 16232 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 16233 // CHECK15-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 16234 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 16235 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16236 // CHECK15-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 16237 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 16238 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 16239 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16240 // CHECK15: omp.inner.for.cond: 16241 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16242 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 16243 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 16244 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16245 // CHECK15: omp.inner.for.body: 16246 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16247 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 16248 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 16249 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 16250 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16251 // CHECK15: omp.body.continue: 16252 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16253 // CHECK15: omp.inner.for.inc: 16254 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16255 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 16256 // CHECK15-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16257 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 16258 // CHECK15: omp.inner.for.end: 16259 // CHECK15-NEXT: store i32 33, i32* [[I]], align 4 16260 // CHECK15-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 16261 // CHECK15-NEXT: store i64 [[CALL]], i64* [[K]], align 8 16262 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 16263 // CHECK15-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 16264 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 16265 // CHECK15-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 16266 // CHECK15-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 16267 // CHECK15-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 16268 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 16269 // CHECK15: omp.inner.for.cond9: 16270 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16271 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 16272 // CHECK15-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 16273 // CHECK15-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 16274 // CHECK15: omp.inner.for.body11: 16275 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16276 // CHECK15-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 16277 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 16278 // CHECK15-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 16279 // CHECK15-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 16280 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16281 // CHECK15-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 16282 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 16283 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 16284 // CHECK15-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 16285 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 16286 // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 16287 // CHECK15-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 16288 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 16289 // CHECK15: omp.body.continue16: 16290 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 16291 // CHECK15: omp.inner.for.inc17: 16292 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16293 // CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 16294 // CHECK15-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16295 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 16296 // CHECK15: omp.inner.for.end19: 16297 // CHECK15-NEXT: store i32 1, i32* [[I7]], align 4 16298 // CHECK15-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 16299 // CHECK15-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 16300 // CHECK15-NEXT: store i32 12, i32* [[LIN]], align 4 16301 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 16302 // CHECK15-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 16303 // CHECK15-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 16304 // CHECK15-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 16305 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 16306 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 16307 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 16308 // CHECK15-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 16309 // CHECK15-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 16310 // CHECK15-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 16311 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 16312 // CHECK15: omp.inner.for.cond29: 16313 // CHECK15-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16314 // CHECK15-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 16315 // CHECK15-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 16316 // CHECK15-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 16317 // CHECK15: omp.inner.for.body31: 16318 // CHECK15-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16319 // CHECK15-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 16320 // CHECK15-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 16321 // CHECK15-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 16322 // CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 16323 // CHECK15-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 16324 // CHECK15-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16325 // CHECK15-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 16326 // CHECK15-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 16327 // CHECK15-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 16328 // CHECK15-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 16329 // CHECK15-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 16330 // CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 16331 // CHECK15-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 16332 // CHECK15-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16333 // CHECK15-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 16334 // CHECK15-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 16335 // CHECK15-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 16336 // CHECK15-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 16337 // CHECK15-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 16338 // CHECK15-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 16339 // CHECK15-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 16340 // CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 16341 // CHECK15-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 16342 // CHECK15-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 16343 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 16344 // CHECK15: omp.body.continue45: 16345 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 16346 // CHECK15: omp.inner.for.inc46: 16347 // CHECK15-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16348 // CHECK15-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 16349 // CHECK15-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16350 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 16351 // CHECK15: omp.inner.for.end48: 16352 // CHECK15-NEXT: store i64 400, i64* [[IT]], align 8 16353 // CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 16354 // CHECK15-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 16355 // CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 16356 // CHECK15-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 16357 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 16358 // CHECK15-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 16359 // CHECK15-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 16360 // CHECK15-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 16361 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 16362 // CHECK15: omp.inner.for.cond54: 16363 // CHECK15-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16364 // CHECK15-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 16365 // CHECK15-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 16366 // CHECK15-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 16367 // CHECK15: omp.inner.for.body56: 16368 // CHECK15-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16369 // CHECK15-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 16370 // CHECK15-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 16371 // CHECK15-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 16372 // CHECK15-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 16373 // CHECK15-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 16374 // CHECK15-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 16375 // CHECK15-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 16376 // CHECK15-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 16377 // CHECK15-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 16378 // CHECK15-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 16379 // CHECK15-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 16380 // CHECK15-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 16381 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 16382 // CHECK15: omp.body.continue64: 16383 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 16384 // CHECK15: omp.inner.for.inc65: 16385 // CHECK15-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16386 // CHECK15-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 16387 // CHECK15-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16388 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 16389 // CHECK15: omp.inner.for.end67: 16390 // CHECK15-NEXT: store i16 22, i16* [[IT53]], align 2 16391 // CHECK15-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 16392 // CHECK15-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 16393 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 16394 // CHECK15-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 16395 // CHECK15-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 16396 // CHECK15-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 16397 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 16398 // CHECK15: omp.inner.for.cond73: 16399 // CHECK15-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16400 // CHECK15-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 16401 // CHECK15-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 16402 // CHECK15-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 16403 // CHECK15: omp.inner.for.body75: 16404 // CHECK15-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16405 // CHECK15-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 16406 // CHECK15-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 16407 // CHECK15-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 16408 // CHECK15-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 16409 // CHECK15-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 16410 // CHECK15-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 16411 // CHECK15-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 16412 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 16413 // CHECK15-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 16414 // CHECK15-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 16415 // CHECK15-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 16416 // CHECK15-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 16417 // CHECK15-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 16418 // CHECK15-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 16419 // CHECK15-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 16420 // CHECK15-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 16421 // CHECK15-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 16422 // CHECK15-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 16423 // CHECK15-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 16424 // CHECK15-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 16425 // CHECK15-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 16426 // CHECK15-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 16427 // CHECK15-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 16428 // CHECK15-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 16429 // CHECK15-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 16430 // CHECK15-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 16431 // CHECK15-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 16432 // CHECK15-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 16433 // CHECK15-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 16434 // CHECK15-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 16435 // CHECK15-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 16436 // CHECK15-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 16437 // CHECK15-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 16438 // CHECK15-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 16439 // CHECK15-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 16440 // CHECK15-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 16441 // CHECK15-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 16442 // CHECK15-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 16443 // CHECK15-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 16444 // CHECK15-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 16445 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 16446 // CHECK15: omp.body.continue97: 16447 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 16448 // CHECK15: omp.inner.for.inc98: 16449 // CHECK15-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16450 // CHECK15-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 16451 // CHECK15-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16452 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 16453 // CHECK15: omp.inner.for.end100: 16454 // CHECK15-NEXT: store i8 96, i8* [[IT72]], align 1 16455 // CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 16456 // CHECK15-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 16457 // CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 16458 // CHECK15-NEXT: ret i32 [[TMP56]] 16459 // 16460 // 16461 // CHECK15-LABEL: define {{[^@]+}}@_Z3bari 16462 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16463 // CHECK15-NEXT: entry: 16464 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16465 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 16466 // CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 16467 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16468 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 16469 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16470 // CHECK15-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 16471 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 16472 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 16473 // CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4 16474 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 16475 // CHECK15-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 16476 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 16477 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 16478 // CHECK15-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 16479 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 16480 // CHECK15-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 16481 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 16482 // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 16483 // CHECK15-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 16484 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 16485 // CHECK15-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 16486 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 16487 // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 16488 // CHECK15-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 16489 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 16490 // CHECK15-NEXT: ret i32 [[TMP8]] 16491 // 16492 // 16493 // CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 16494 // CHECK15-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 16495 // CHECK15-NEXT: entry: 16496 // CHECK15-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 16497 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16498 // CHECK15-NEXT: [[B:%.*]] = alloca i32, align 4 16499 // CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 16500 // CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 16501 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 16502 // CHECK15-NEXT: [[TMP:%.*]] = alloca i64, align 4 16503 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16504 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16505 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16506 // CHECK15-NEXT: [[IT:%.*]] = alloca i64, align 8 16507 // CHECK15-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 16508 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16509 // CHECK15-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 16510 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16511 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 16512 // CHECK15-NEXT: store i32 [[ADD]], i32* [[B]], align 4 16513 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 16514 // CHECK15-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 16515 // CHECK15-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 16516 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 16517 // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 16518 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 16519 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 16520 // CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 16521 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 16522 // CHECK15-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 16523 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16524 // CHECK15-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 16525 // CHECK15-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16526 // CHECK15-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 16527 // CHECK15-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 16528 // CHECK15-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 16529 // CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 16530 // CHECK15: omp_if.then: 16531 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16532 // CHECK15: omp.inner.for.cond: 16533 // CHECK15-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 16534 // CHECK15-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 16535 // CHECK15-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 16536 // CHECK15-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16537 // CHECK15: omp.inner.for.body: 16538 // CHECK15-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 16539 // CHECK15-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 16540 // CHECK15-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 16541 // CHECK15-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 16542 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 16543 // CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double 16544 // CHECK15-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 16545 // CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 16546 // CHECK15-NEXT: store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19 16547 // CHECK15-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16548 // CHECK15-NEXT: [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 16549 // CHECK15-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 16550 // CHECK15-NEXT: store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 16551 // CHECK15-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 16552 // CHECK15-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 16553 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 16554 // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 16555 // CHECK15-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19 16556 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16557 // CHECK15: omp.body.continue: 16558 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16559 // CHECK15: omp.inner.for.inc: 16560 // CHECK15-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 16561 // CHECK15-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 16562 // CHECK15-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 16563 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 16564 // CHECK15: omp.inner.for.end: 16565 // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] 16566 // CHECK15: omp_if.else: 16567 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 16568 // CHECK15: omp.inner.for.cond8: 16569 // CHECK15-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16570 // CHECK15-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16571 // CHECK15-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] 16572 // CHECK15-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 16573 // CHECK15: omp.inner.for.body10: 16574 // CHECK15-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16575 // CHECK15-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 16576 // CHECK15-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 16577 // CHECK15-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 16578 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[B]], align 4 16579 // CHECK15-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double 16580 // CHECK15-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 16581 // CHECK15-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16582 // CHECK15-NEXT: store double [[ADD14]], double* [[A15]], align 4 16583 // CHECK15-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 16584 // CHECK15-NEXT: [[TMP18:%.*]] = load double, double* [[A16]], align 4 16585 // CHECK15-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 16586 // CHECK15-NEXT: store double [[INC17]], double* [[A16]], align 4 16587 // CHECK15-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 16588 // CHECK15-NEXT: [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]] 16589 // CHECK15-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]] 16590 // CHECK15-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1 16591 // CHECK15-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 16592 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 16593 // CHECK15: omp.body.continue21: 16594 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 16595 // CHECK15: omp.inner.for.inc22: 16596 // CHECK15-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16597 // CHECK15-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 16598 // CHECK15-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 16599 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] 16600 // CHECK15: omp.inner.for.end24: 16601 // CHECK15-NEXT: br label [[OMP_IF_END]] 16602 // CHECK15: omp_if.end: 16603 // CHECK15-NEXT: store i64 400, i64* [[IT]], align 8 16604 // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] 16605 // CHECK15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]] 16606 // CHECK15-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 16607 // CHECK15-NEXT: [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 16608 // CHECK15-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 16609 // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[B]], align 4 16610 // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] 16611 // CHECK15-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 16612 // CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]) 16613 // CHECK15-NEXT: ret i32 [[ADD28]] 16614 // 16615 // 16616 // CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici 16617 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16618 // CHECK15-NEXT: entry: 16619 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16620 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 16621 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 16622 // CHECK15-NEXT: [[AAA:%.*]] = alloca i8, align 1 16623 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16624 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 16625 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16626 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16627 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16628 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 16629 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 16630 // CHECK15-NEXT: store i8 0, i8* [[AAA]], align 1 16631 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16632 // CHECK15-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 16633 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 16634 // CHECK15-NEXT: ret i32 [[TMP0]] 16635 // 16636 // 16637 // CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 16638 // CHECK15-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 16639 // CHECK15-NEXT: entry: 16640 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16641 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 16642 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 16643 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16644 // CHECK15-NEXT: [[TMP:%.*]] = alloca i64, align 4 16645 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16646 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16647 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16648 // CHECK15-NEXT: [[I:%.*]] = alloca i64, align 8 16649 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16650 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 16651 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 16652 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16653 // CHECK15-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 16654 // CHECK15-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16655 // CHECK15-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 16656 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16657 // CHECK15: omp.inner.for.cond: 16658 // CHECK15-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 16659 // CHECK15-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25 16660 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 16661 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16662 // CHECK15: omp.inner.for.body: 16663 // CHECK15-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 16664 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 16665 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 16666 // CHECK15-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25 16667 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25 16668 // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 16669 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25 16670 // CHECK15-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25 16671 // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 16672 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 16673 // CHECK15-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 16674 // CHECK15-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25 16675 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 16676 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 16677 // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 16678 // CHECK15-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 16679 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16680 // CHECK15: omp.body.continue: 16681 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16682 // CHECK15: omp.inner.for.inc: 16683 // CHECK15-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 16684 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 16685 // CHECK15-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 16686 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 16687 // CHECK15: omp.inner.for.end: 16688 // CHECK15-NEXT: store i64 11, i64* [[I]], align 8 16689 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 16690 // CHECK15-NEXT: ret i32 [[TMP8]] 16691 // 16692 // 16693 // CHECK16-LABEL: define {{[^@]+}}@_Z7get_valv 16694 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] { 16695 // CHECK16-NEXT: entry: 16696 // CHECK16-NEXT: ret i64 0 16697 // 16698 // 16699 // CHECK16-LABEL: define {{[^@]+}}@_Z3fooi 16700 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16701 // CHECK16-NEXT: entry: 16702 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16703 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 16704 // CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2 16705 // CHECK16-NEXT: [[B:%.*]] = alloca [10 x float], align 4 16706 // CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 16707 // CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 16708 // CHECK16-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 16709 // CHECK16-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 16710 // CHECK16-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 16711 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 16712 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16713 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16714 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16715 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 16716 // CHECK16-NEXT: [[K:%.*]] = alloca i64, align 8 16717 // CHECK16-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 16718 // CHECK16-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 16719 // CHECK16-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 16720 // CHECK16-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 16721 // CHECK16-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 16722 // CHECK16-NEXT: [[I7:%.*]] = alloca i32, align 4 16723 // CHECK16-NEXT: [[K8:%.*]] = alloca i64, align 8 16724 // CHECK16-NEXT: [[LIN:%.*]] = alloca i32, align 4 16725 // CHECK16-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 16726 // CHECK16-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 16727 // CHECK16-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 16728 // CHECK16-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 16729 // CHECK16-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 16730 // CHECK16-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 16731 // CHECK16-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 16732 // CHECK16-NEXT: [[IT:%.*]] = alloca i64, align 8 16733 // CHECK16-NEXT: [[LIN27:%.*]] = alloca i32, align 4 16734 // CHECK16-NEXT: [[A28:%.*]] = alloca i32, align 4 16735 // CHECK16-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 16736 // CHECK16-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 16737 // CHECK16-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 16738 // CHECK16-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 16739 // CHECK16-NEXT: [[IT53:%.*]] = alloca i16, align 2 16740 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 16741 // CHECK16-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 16742 // CHECK16-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 16743 // CHECK16-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 16744 // CHECK16-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 16745 // CHECK16-NEXT: [[IT72:%.*]] = alloca i8, align 1 16746 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16747 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 16748 // CHECK16-NEXT: store i16 0, i16* [[AA]], align 2 16749 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16750 // CHECK16-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 16751 // CHECK16-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 16752 // CHECK16-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 16753 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 16754 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 16755 // CHECK16-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 16756 // CHECK16-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 16757 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 16758 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16759 // CHECK16-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 16760 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 16761 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 16762 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16763 // CHECK16: omp.inner.for.cond: 16764 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16765 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 16766 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 16767 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16768 // CHECK16: omp.inner.for.body: 16769 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16770 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 16771 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 16772 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 16773 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16774 // CHECK16: omp.body.continue: 16775 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16776 // CHECK16: omp.inner.for.inc: 16777 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16778 // CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 16779 // CHECK16-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 16780 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 16781 // CHECK16: omp.inner.for.end: 16782 // CHECK16-NEXT: store i32 33, i32* [[I]], align 4 16783 // CHECK16-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 16784 // CHECK16-NEXT: store i64 [[CALL]], i64* [[K]], align 8 16785 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 16786 // CHECK16-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 16787 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 16788 // CHECK16-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 16789 // CHECK16-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 16790 // CHECK16-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 16791 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 16792 // CHECK16: omp.inner.for.cond9: 16793 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16794 // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 16795 // CHECK16-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 16796 // CHECK16-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 16797 // CHECK16: omp.inner.for.body11: 16798 // CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16799 // CHECK16-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 16800 // CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 16801 // CHECK16-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 16802 // CHECK16-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 16803 // CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16804 // CHECK16-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 16805 // CHECK16-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 16806 // CHECK16-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 16807 // CHECK16-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 16808 // CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 16809 // CHECK16-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 16810 // CHECK16-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 16811 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 16812 // CHECK16: omp.body.continue16: 16813 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 16814 // CHECK16: omp.inner.for.inc17: 16815 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16816 // CHECK16-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 16817 // CHECK16-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 16818 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 16819 // CHECK16: omp.inner.for.end19: 16820 // CHECK16-NEXT: store i32 1, i32* [[I7]], align 4 16821 // CHECK16-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 16822 // CHECK16-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 16823 // CHECK16-NEXT: store i32 12, i32* [[LIN]], align 4 16824 // CHECK16-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 16825 // CHECK16-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 16826 // CHECK16-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 16827 // CHECK16-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 16828 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 16829 // CHECK16-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 16830 // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 16831 // CHECK16-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 16832 // CHECK16-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 16833 // CHECK16-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 16834 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 16835 // CHECK16: omp.inner.for.cond29: 16836 // CHECK16-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16837 // CHECK16-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 16838 // CHECK16-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 16839 // CHECK16-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 16840 // CHECK16: omp.inner.for.body31: 16841 // CHECK16-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16842 // CHECK16-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 16843 // CHECK16-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 16844 // CHECK16-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 16845 // CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 16846 // CHECK16-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 16847 // CHECK16-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16848 // CHECK16-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 16849 // CHECK16-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 16850 // CHECK16-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 16851 // CHECK16-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 16852 // CHECK16-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 16853 // CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 16854 // CHECK16-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 16855 // CHECK16-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16856 // CHECK16-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 16857 // CHECK16-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 16858 // CHECK16-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 16859 // CHECK16-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 16860 // CHECK16-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 16861 // CHECK16-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 16862 // CHECK16-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 16863 // CHECK16-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 16864 // CHECK16-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 16865 // CHECK16-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 16866 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 16867 // CHECK16: omp.body.continue45: 16868 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 16869 // CHECK16: omp.inner.for.inc46: 16870 // CHECK16-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16871 // CHECK16-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 16872 // CHECK16-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 16873 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 16874 // CHECK16: omp.inner.for.end48: 16875 // CHECK16-NEXT: store i64 400, i64* [[IT]], align 8 16876 // CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 16877 // CHECK16-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 16878 // CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 16879 // CHECK16-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 16880 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 16881 // CHECK16-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 16882 // CHECK16-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 16883 // CHECK16-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 16884 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 16885 // CHECK16: omp.inner.for.cond54: 16886 // CHECK16-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16887 // CHECK16-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 16888 // CHECK16-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 16889 // CHECK16-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 16890 // CHECK16: omp.inner.for.body56: 16891 // CHECK16-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16892 // CHECK16-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 16893 // CHECK16-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 16894 // CHECK16-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 16895 // CHECK16-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 16896 // CHECK16-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 16897 // CHECK16-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 16898 // CHECK16-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 16899 // CHECK16-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 16900 // CHECK16-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 16901 // CHECK16-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 16902 // CHECK16-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 16903 // CHECK16-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 16904 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 16905 // CHECK16: omp.body.continue64: 16906 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 16907 // CHECK16: omp.inner.for.inc65: 16908 // CHECK16-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16909 // CHECK16-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 16910 // CHECK16-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 16911 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 16912 // CHECK16: omp.inner.for.end67: 16913 // CHECK16-NEXT: store i16 22, i16* [[IT53]], align 2 16914 // CHECK16-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 16915 // CHECK16-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 16916 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 16917 // CHECK16-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 16918 // CHECK16-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 16919 // CHECK16-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 16920 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 16921 // CHECK16: omp.inner.for.cond73: 16922 // CHECK16-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16923 // CHECK16-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 16924 // CHECK16-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 16925 // CHECK16-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 16926 // CHECK16: omp.inner.for.body75: 16927 // CHECK16-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16928 // CHECK16-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 16929 // CHECK16-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 16930 // CHECK16-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 16931 // CHECK16-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 16932 // CHECK16-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 16933 // CHECK16-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 16934 // CHECK16-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 16935 // CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 16936 // CHECK16-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 16937 // CHECK16-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 16938 // CHECK16-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 16939 // CHECK16-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 16940 // CHECK16-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 16941 // CHECK16-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 16942 // CHECK16-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 16943 // CHECK16-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 16944 // CHECK16-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 16945 // CHECK16-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 16946 // CHECK16-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 16947 // CHECK16-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 16948 // CHECK16-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 16949 // CHECK16-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 16950 // CHECK16-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 16951 // CHECK16-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 16952 // CHECK16-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 16953 // CHECK16-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 16954 // CHECK16-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 16955 // CHECK16-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 16956 // CHECK16-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 16957 // CHECK16-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 16958 // CHECK16-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 16959 // CHECK16-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 16960 // CHECK16-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 16961 // CHECK16-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 16962 // CHECK16-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 16963 // CHECK16-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 16964 // CHECK16-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 16965 // CHECK16-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 16966 // CHECK16-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 16967 // CHECK16-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 16968 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 16969 // CHECK16: omp.body.continue97: 16970 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 16971 // CHECK16: omp.inner.for.inc98: 16972 // CHECK16-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16973 // CHECK16-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 16974 // CHECK16-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 16975 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 16976 // CHECK16: omp.inner.for.end100: 16977 // CHECK16-NEXT: store i8 96, i8* [[IT72]], align 1 16978 // CHECK16-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 16979 // CHECK16-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 16980 // CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 16981 // CHECK16-NEXT: ret i32 [[TMP56]] 16982 // 16983 // 16984 // CHECK16-LABEL: define {{[^@]+}}@_Z3bari 16985 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16986 // CHECK16-NEXT: entry: 16987 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16988 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 16989 // CHECK16-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 16990 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16991 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 16992 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16993 // CHECK16-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 16994 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 16995 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 16996 // CHECK16-NEXT: store i32 [[ADD]], i32* [[A]], align 4 16997 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 16998 // CHECK16-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 16999 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 17000 // CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 17001 // CHECK16-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 17002 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 17003 // CHECK16-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 17004 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 17005 // CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 17006 // CHECK16-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 17007 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 17008 // CHECK16-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 17009 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 17010 // CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 17011 // CHECK16-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 17012 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 17013 // CHECK16-NEXT: ret i32 [[TMP8]] 17014 // 17015 // 17016 // CHECK16-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 17017 // CHECK16-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 17018 // CHECK16-NEXT: entry: 17019 // CHECK16-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 17020 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 17021 // CHECK16-NEXT: [[B:%.*]] = alloca i32, align 4 17022 // CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 17023 // CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 17024 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 17025 // CHECK16-NEXT: [[TMP:%.*]] = alloca i64, align 4 17026 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17027 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17028 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17029 // CHECK16-NEXT: [[IT:%.*]] = alloca i64, align 8 17030 // CHECK16-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 17031 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 17032 // CHECK16-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 17033 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 17034 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 17035 // CHECK16-NEXT: store i32 [[ADD]], i32* [[B]], align 4 17036 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 17037 // CHECK16-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 17038 // CHECK16-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 17039 // CHECK16-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 17040 // CHECK16-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 17041 // CHECK16-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 17042 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 17043 // CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 17044 // CHECK16-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 17045 // CHECK16-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 17046 // CHECK16-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17047 // CHECK16-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 17048 // CHECK16-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17049 // CHECK16-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 17050 // CHECK16-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 17051 // CHECK16-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 17052 // CHECK16-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 17053 // CHECK16: omp_if.then: 17054 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17055 // CHECK16: omp.inner.for.cond: 17056 // CHECK16-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 17057 // CHECK16-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 17058 // CHECK16-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 17059 // CHECK16-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17060 // CHECK16: omp.inner.for.body: 17061 // CHECK16-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 17062 // CHECK16-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 17063 // CHECK16-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 17064 // CHECK16-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 17065 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 17066 // CHECK16-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double 17067 // CHECK16-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 17068 // CHECK16-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 17069 // CHECK16-NEXT: store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19 17070 // CHECK16-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 17071 // CHECK16-NEXT: [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 17072 // CHECK16-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 17073 // CHECK16-NEXT: store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 17074 // CHECK16-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 17075 // CHECK16-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 17076 // CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 17077 // CHECK16-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 17078 // CHECK16-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19 17079 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17080 // CHECK16: omp.body.continue: 17081 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17082 // CHECK16: omp.inner.for.inc: 17083 // CHECK16-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 17084 // CHECK16-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 17085 // CHECK16-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 17086 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 17087 // CHECK16: omp.inner.for.end: 17088 // CHECK16-NEXT: br label [[OMP_IF_END:%.*]] 17089 // CHECK16: omp_if.else: 17090 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 17091 // CHECK16: omp.inner.for.cond8: 17092 // CHECK16-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17093 // CHECK16-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17094 // CHECK16-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] 17095 // CHECK16-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 17096 // CHECK16: omp.inner.for.body10: 17097 // CHECK16-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17098 // CHECK16-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 17099 // CHECK16-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 17100 // CHECK16-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 17101 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[B]], align 4 17102 // CHECK16-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double 17103 // CHECK16-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 17104 // CHECK16-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 17105 // CHECK16-NEXT: store double [[ADD14]], double* [[A15]], align 4 17106 // CHECK16-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 17107 // CHECK16-NEXT: [[TMP18:%.*]] = load double, double* [[A16]], align 4 17108 // CHECK16-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 17109 // CHECK16-NEXT: store double [[INC17]], double* [[A16]], align 4 17110 // CHECK16-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 17111 // CHECK16-NEXT: [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]] 17112 // CHECK16-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]] 17113 // CHECK16-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1 17114 // CHECK16-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 17115 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 17116 // CHECK16: omp.body.continue21: 17117 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 17118 // CHECK16: omp.inner.for.inc22: 17119 // CHECK16-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17120 // CHECK16-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 17121 // CHECK16-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 17122 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] 17123 // CHECK16: omp.inner.for.end24: 17124 // CHECK16-NEXT: br label [[OMP_IF_END]] 17125 // CHECK16: omp_if.end: 17126 // CHECK16-NEXT: store i64 400, i64* [[IT]], align 8 17127 // CHECK16-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] 17128 // CHECK16-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]] 17129 // CHECK16-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 17130 // CHECK16-NEXT: [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 17131 // CHECK16-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 17132 // CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[B]], align 4 17133 // CHECK16-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] 17134 // CHECK16-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 17135 // CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]) 17136 // CHECK16-NEXT: ret i32 [[ADD28]] 17137 // 17138 // 17139 // CHECK16-LABEL: define {{[^@]+}}@_ZL7fstatici 17140 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 17141 // CHECK16-NEXT: entry: 17142 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 17143 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 17144 // CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2 17145 // CHECK16-NEXT: [[AAA:%.*]] = alloca i8, align 1 17146 // CHECK16-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 17147 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 17148 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17149 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17150 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 17151 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 17152 // CHECK16-NEXT: store i16 0, i16* [[AA]], align 2 17153 // CHECK16-NEXT: store i8 0, i8* [[AAA]], align 1 17154 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17155 // CHECK16-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 17156 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 17157 // CHECK16-NEXT: ret i32 [[TMP0]] 17158 // 17159 // 17160 // CHECK16-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 17161 // CHECK16-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 17162 // CHECK16-NEXT: entry: 17163 // CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 17164 // CHECK16-NEXT: [[A:%.*]] = alloca i32, align 4 17165 // CHECK16-NEXT: [[AA:%.*]] = alloca i16, align 2 17166 // CHECK16-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 17167 // CHECK16-NEXT: [[TMP:%.*]] = alloca i64, align 4 17168 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17169 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17170 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17171 // CHECK16-NEXT: [[I:%.*]] = alloca i64, align 8 17172 // CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 17173 // CHECK16-NEXT: store i32 0, i32* [[A]], align 4 17174 // CHECK16-NEXT: store i16 0, i16* [[AA]], align 2 17175 // CHECK16-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17176 // CHECK16-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 17177 // CHECK16-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17178 // CHECK16-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 17179 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17180 // CHECK16: omp.inner.for.cond: 17181 // CHECK16-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 17182 // CHECK16-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25 17183 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 17184 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17185 // CHECK16: omp.inner.for.body: 17186 // CHECK16-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 17187 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 17188 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 17189 // CHECK16-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25 17190 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25 17191 // CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 17192 // CHECK16-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25 17193 // CHECK16-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25 17194 // CHECK16-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 17195 // CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 17196 // CHECK16-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 17197 // CHECK16-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25 17198 // CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 17199 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 17200 // CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 17201 // CHECK16-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 17202 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17203 // CHECK16: omp.body.continue: 17204 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17205 // CHECK16: omp.inner.for.inc: 17206 // CHECK16-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 17207 // CHECK16-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 17208 // CHECK16-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 17209 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 17210 // CHECK16: omp.inner.for.end: 17211 // CHECK16-NEXT: store i64 11, i64* [[I]], align 8 17212 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 17213 // CHECK16-NEXT: ret i32 [[TMP8]] 17214 // 17215 // 17216 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 17217 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] { 17218 // CHECK17-NEXT: entry: 17219 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 17220 // CHECK17-NEXT: ret void 17221 // 17222 // 17223 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined. 17224 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 17225 // CHECK17-NEXT: entry: 17226 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17227 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17228 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17229 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 17230 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17231 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17232 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17233 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17234 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4 17235 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17236 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17237 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17238 // CHECK17-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 17239 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17240 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17241 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17242 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 17243 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 17244 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17245 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 17246 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17247 // CHECK17: cond.true: 17248 // CHECK17-NEXT: br label [[COND_END:%.*]] 17249 // CHECK17: cond.false: 17250 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17251 // CHECK17-NEXT: br label [[COND_END]] 17252 // CHECK17: cond.end: 17253 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 17254 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17255 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17256 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 17257 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17258 // CHECK17: omp.inner.for.cond: 17259 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 17260 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 17261 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 17262 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17263 // CHECK17: omp.inner.for.body: 17264 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 17265 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 17266 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 17267 // CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 17268 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17269 // CHECK17: omp.body.continue: 17270 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17271 // CHECK17: omp.inner.for.inc: 17272 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 17273 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 17274 // CHECK17-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 17275 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 17276 // CHECK17: omp.inner.for.end: 17277 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17278 // CHECK17: omp.loop.exit: 17279 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 17280 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17281 // CHECK17-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 17282 // CHECK17-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 17283 // CHECK17: .omp.final.then: 17284 // CHECK17-NEXT: store i32 33, i32* [[I]], align 4 17285 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 17286 // CHECK17: .omp.final.done: 17287 // CHECK17-NEXT: ret void 17288 // 17289 // 17290 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 17291 // CHECK17-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 17292 // CHECK17-NEXT: entry: 17293 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17294 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 17295 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17296 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17297 // CHECK17-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 17298 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17299 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17300 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 17301 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17302 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17303 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 17304 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17305 // CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 17306 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17307 // CHECK17-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 17308 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17309 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 17310 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 17311 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 17312 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 17313 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 17314 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17315 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 17316 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 17317 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 17318 // CHECK17-NEXT: ret void 17319 // 17320 // 17321 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1 17322 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 17323 // CHECK17-NEXT: entry: 17324 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17325 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17326 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17327 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 17328 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17329 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17330 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 17331 // CHECK17-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 17332 // CHECK17-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 17333 // CHECK17-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 17334 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17335 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17336 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17337 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17338 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 17339 // CHECK17-NEXT: [[LIN4:%.*]] = alloca i32, align 4 17340 // CHECK17-NEXT: [[A5:%.*]] = alloca i32, align 4 17341 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17342 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17343 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17344 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 17345 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17346 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17347 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 17348 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17349 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 17350 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 17351 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 17352 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 17353 // CHECK17-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 17354 // CHECK17-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 17355 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17356 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 17357 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17358 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17359 // CHECK17-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17360 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 17361 // CHECK17-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 17362 // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17363 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17364 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 17365 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17366 // CHECK17: cond.true: 17367 // CHECK17-NEXT: br label [[COND_END:%.*]] 17368 // CHECK17: cond.false: 17369 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17370 // CHECK17-NEXT: br label [[COND_END]] 17371 // CHECK17: cond.end: 17372 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 17373 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17374 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17375 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 17376 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17377 // CHECK17: omp.inner.for.cond: 17378 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17379 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17 17380 // CHECK17-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 17381 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17382 // CHECK17: omp.inner.for.body: 17383 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17384 // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 17385 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 17386 // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17 17387 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17 17388 // CHECK17-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 17389 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17390 // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 17391 // CHECK17-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 17392 // CHECK17-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 17393 // CHECK17-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 17394 // CHECK17-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17 17395 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17 17396 // CHECK17-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 17397 // CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17398 // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 17399 // CHECK17-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 17400 // CHECK17-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 17401 // CHECK17-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 17402 // CHECK17-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17 17403 // CHECK17-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !17 17404 // CHECK17-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 17405 // CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 17406 // CHECK17-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 17407 // CHECK17-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !17 17408 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17409 // CHECK17: omp.body.continue: 17410 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17411 // CHECK17: omp.inner.for.inc: 17412 // CHECK17-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17413 // CHECK17-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 17414 // CHECK17-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 17415 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] 17416 // CHECK17: omp.inner.for.end: 17417 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17418 // CHECK17: omp.loop.exit: 17419 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 17420 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17421 // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 17422 // CHECK17-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 17423 // CHECK17: .omp.final.then: 17424 // CHECK17-NEXT: store i64 400, i64* [[IT]], align 8 17425 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 17426 // CHECK17: .omp.final.done: 17427 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17428 // CHECK17-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 17429 // CHECK17-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 17430 // CHECK17: .omp.linear.pu: 17431 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 17432 // CHECK17-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 17433 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 17434 // CHECK17-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 17435 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 17436 // CHECK17: .omp.linear.pu.done: 17437 // CHECK17-NEXT: ret void 17438 // 17439 // 17440 // CHECK17-LABEL: define {{[^@]+}}@_Z7get_valv 17441 // CHECK17-SAME: () #[[ATTR3:[0-9]+]] { 17442 // CHECK17-NEXT: entry: 17443 // CHECK17-NEXT: ret i64 0 17444 // 17445 // 17446 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 17447 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 17448 // CHECK17-NEXT: entry: 17449 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17450 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17451 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17452 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17453 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17454 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17455 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17456 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17457 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 17458 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17459 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 17460 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 17461 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 17462 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17463 // CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 17464 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17465 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 17466 // CHECK17-NEXT: ret void 17467 // 17468 // 17469 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2 17470 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 17471 // CHECK17-NEXT: entry: 17472 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17473 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17474 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17475 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17476 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17477 // CHECK17-NEXT: [[TMP:%.*]] = alloca i16, align 2 17478 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17479 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17480 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17481 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17482 // CHECK17-NEXT: [[IT:%.*]] = alloca i16, align 2 17483 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17484 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17485 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17486 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17487 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17488 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17489 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17490 // CHECK17-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 17491 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17492 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17493 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17494 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 17495 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 17496 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17497 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 17498 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17499 // CHECK17: cond.true: 17500 // CHECK17-NEXT: br label [[COND_END:%.*]] 17501 // CHECK17: cond.false: 17502 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17503 // CHECK17-NEXT: br label [[COND_END]] 17504 // CHECK17: cond.end: 17505 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 17506 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17507 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17508 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 17509 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17510 // CHECK17: omp.inner.for.cond: 17511 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 17512 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20 17513 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 17514 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17515 // CHECK17: omp.inner.for.body: 17516 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 17517 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 17518 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 17519 // CHECK17-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 17520 // CHECK17-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20 17521 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !20 17522 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 17523 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !20 17524 // CHECK17-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !20 17525 // CHECK17-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 17526 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 17527 // CHECK17-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 17528 // CHECK17-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !20 17529 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17530 // CHECK17: omp.body.continue: 17531 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17532 // CHECK17: omp.inner.for.inc: 17533 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 17534 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 17535 // CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 17536 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 17537 // CHECK17: omp.inner.for.end: 17538 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17539 // CHECK17: omp.loop.exit: 17540 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 17541 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17542 // CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 17543 // CHECK17-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 17544 // CHECK17: .omp.final.then: 17545 // CHECK17-NEXT: store i16 22, i16* [[IT]], align 2 17546 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 17547 // CHECK17: .omp.final.done: 17548 // CHECK17-NEXT: ret void 17549 // 17550 // 17551 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 17552 // CHECK17-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 17553 // CHECK17-NEXT: entry: 17554 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17555 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 17556 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17557 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 17558 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 17559 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17560 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 17561 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 17562 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 17563 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 17564 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17565 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 17566 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17567 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 17568 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17569 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 17570 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 17571 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17572 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 17573 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 17574 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 17575 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 17576 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17577 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 17578 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17579 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 17580 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 17581 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17582 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 17583 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 17584 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 17585 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 17586 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 17587 // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17588 // CHECK17-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 17589 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 17590 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 17591 // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 17592 // CHECK17-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 17593 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 17594 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 17595 // CHECK17-NEXT: ret void 17596 // 17597 // 17598 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3 17599 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 17600 // CHECK17-NEXT: entry: 17601 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17602 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17603 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17604 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 17605 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17606 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 17607 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 17608 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17609 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 17610 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 17611 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 17612 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 17613 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17614 // CHECK17-NEXT: [[TMP:%.*]] = alloca i8, align 1 17615 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17616 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17617 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17618 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17619 // CHECK17-NEXT: [[IT:%.*]] = alloca i8, align 1 17620 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17621 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17622 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17623 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 17624 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17625 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 17626 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 17627 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17628 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 17629 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 17630 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 17631 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 17632 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17633 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 17634 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17635 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 17636 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 17637 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17638 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 17639 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 17640 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 17641 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 17642 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17643 // CHECK17-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 17644 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17645 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17646 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 17647 // CHECK17-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17648 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 17649 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 17650 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 17651 // CHECK17: omp.dispatch.cond: 17652 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17653 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 17654 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17655 // CHECK17: cond.true: 17656 // CHECK17-NEXT: br label [[COND_END:%.*]] 17657 // CHECK17: cond.false: 17658 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17659 // CHECK17-NEXT: br label [[COND_END]] 17660 // CHECK17: cond.end: 17661 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 17662 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17663 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17664 // CHECK17-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 17665 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17666 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17667 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 17668 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 17669 // CHECK17: omp.dispatch.body: 17670 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17671 // CHECK17: omp.inner.for.cond: 17672 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 17673 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 17674 // CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 17675 // CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17676 // CHECK17: omp.inner.for.body: 17677 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 17678 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 17679 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 17680 // CHECK17-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 17681 // CHECK17-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23 17682 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !23 17683 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 17684 // CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !23 17685 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 17686 // CHECK17-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23 17687 // CHECK17-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 17688 // CHECK17-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 17689 // CHECK17-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 17690 // CHECK17-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23 17691 // CHECK17-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 17692 // CHECK17-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 17693 // CHECK17-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 17694 // CHECK17-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 17695 // CHECK17-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 17696 // CHECK17-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 17697 // CHECK17-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 17698 // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 17699 // CHECK17-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 17700 // CHECK17-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 17701 // CHECK17-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 17702 // CHECK17-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 17703 // CHECK17-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 17704 // CHECK17-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 17705 // CHECK17-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 17706 // CHECK17-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 17707 // CHECK17-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 17708 // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 17709 // CHECK17-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23 17710 // CHECK17-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 17711 // CHECK17-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23 17712 // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 17713 // CHECK17-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23 17714 // CHECK17-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 17715 // CHECK17-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 17716 // CHECK17-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 17717 // CHECK17-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23 17718 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17719 // CHECK17: omp.body.continue: 17720 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17721 // CHECK17: omp.inner.for.inc: 17722 // CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 17723 // CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 17724 // CHECK17-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 17725 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 17726 // CHECK17: omp.inner.for.end: 17727 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 17728 // CHECK17: omp.dispatch.inc: 17729 // CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17730 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17731 // CHECK17-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 17732 // CHECK17-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 17733 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17734 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17735 // CHECK17-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 17736 // CHECK17-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 17737 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] 17738 // CHECK17: omp.dispatch.end: 17739 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 17740 // CHECK17-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17741 // CHECK17-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 17742 // CHECK17-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 17743 // CHECK17: .omp.final.then: 17744 // CHECK17-NEXT: store i8 96, i8* [[IT]], align 1 17745 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 17746 // CHECK17: .omp.final.done: 17747 // CHECK17-NEXT: ret void 17748 // 17749 // 17750 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 17751 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 17752 // CHECK17-NEXT: entry: 17753 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17754 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17755 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17756 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17757 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17758 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17759 // CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 17760 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17761 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17762 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17763 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17764 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17765 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17766 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17767 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17768 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 17769 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17770 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 17771 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 17772 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 17773 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17774 // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 17775 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17776 // CHECK17-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 17777 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 17778 // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 17779 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 17780 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 17781 // CHECK17-NEXT: ret void 17782 // 17783 // 17784 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4 17785 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 17786 // CHECK17-NEXT: entry: 17787 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17788 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17789 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17790 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17791 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17792 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17793 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17794 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 17795 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17796 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17797 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17798 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17799 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17800 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17801 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17802 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17803 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17804 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17805 // CHECK17-NEXT: ret void 17806 // 17807 // 17808 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 17809 // CHECK17-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 17810 // CHECK17-NEXT: entry: 17811 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17812 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17813 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17814 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17815 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17816 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 17817 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17818 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17819 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17820 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17821 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17822 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 17823 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 17824 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17825 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17826 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 17827 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 17828 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 17829 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 17830 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 17831 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 17832 // CHECK17-NEXT: ret void 17833 // 17834 // 17835 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 17836 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 17837 // CHECK17-NEXT: entry: 17838 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17839 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17840 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17841 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17842 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17843 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17844 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17845 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17846 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 17847 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17848 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17849 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17850 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17851 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 17852 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17853 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17854 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17855 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17856 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17857 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17858 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17859 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 17860 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 17861 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17862 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17863 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 17864 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17865 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 17866 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17867 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17868 // CHECK17-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17869 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 17870 // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17871 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17872 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 17873 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17874 // CHECK17: cond.true: 17875 // CHECK17-NEXT: br label [[COND_END:%.*]] 17876 // CHECK17: cond.false: 17877 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17878 // CHECK17-NEXT: br label [[COND_END]] 17879 // CHECK17: cond.end: 17880 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 17881 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17882 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17883 // CHECK17-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 17884 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17885 // CHECK17: omp.inner.for.cond: 17886 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 17887 // CHECK17-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26 17888 // CHECK17-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 17889 // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17890 // CHECK17: omp.inner.for.body: 17891 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 17892 // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 17893 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 17894 // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26 17895 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 17896 // CHECK17-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 17897 // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 17898 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 17899 // CHECK17-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group !26 17900 // CHECK17-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 17901 // CHECK17-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !26 17902 // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 17903 // CHECK17-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group !26 17904 // CHECK17-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 17905 // CHECK17-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 17906 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 17907 // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 17908 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !26 17909 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17910 // CHECK17: omp.body.continue: 17911 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17912 // CHECK17: omp.inner.for.inc: 17913 // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 17914 // CHECK17-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 17915 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 17916 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 17917 // CHECK17: omp.inner.for.end: 17918 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17919 // CHECK17: omp.loop.exit: 17920 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 17921 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17922 // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 17923 // CHECK17-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 17924 // CHECK17: .omp.final.then: 17925 // CHECK17-NEXT: store i64 400, i64* [[IT]], align 8 17926 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 17927 // CHECK17: .omp.final.done: 17928 // CHECK17-NEXT: ret void 17929 // 17930 // 17931 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 17932 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 17933 // CHECK17-NEXT: entry: 17934 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17935 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17936 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17937 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17938 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17939 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17940 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17941 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17942 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17943 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17944 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17945 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 17946 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17947 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 17948 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 17949 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 17950 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17951 // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 17952 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17953 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 17954 // CHECK17-NEXT: ret void 17955 // 17956 // 17957 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 17958 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 17959 // CHECK17-NEXT: entry: 17960 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17961 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17962 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17963 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17964 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17965 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17966 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 17967 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17968 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17969 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17970 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17971 // CHECK17-NEXT: [[I:%.*]] = alloca i64, align 8 17972 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17973 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17974 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17975 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17976 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17977 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17978 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17979 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17980 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17981 // CHECK17-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 17982 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17983 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17984 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17985 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 17986 // CHECK17-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17987 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17988 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 17989 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17990 // CHECK17: cond.true: 17991 // CHECK17-NEXT: br label [[COND_END:%.*]] 17992 // CHECK17: cond.false: 17993 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17994 // CHECK17-NEXT: br label [[COND_END]] 17995 // CHECK17: cond.end: 17996 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 17997 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17998 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17999 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 18000 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18001 // CHECK17: omp.inner.for.cond: 18002 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18003 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 18004 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 18005 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18006 // CHECK17: omp.inner.for.body: 18007 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18008 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 18009 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 18010 // CHECK17-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !29 18011 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !29 18012 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 18013 // CHECK17-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !29 18014 // CHECK17-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !29 18015 // CHECK17-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 18016 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 18017 // CHECK17-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 18018 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !29 18019 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 18020 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !29 18021 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 18022 // CHECK17-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !29 18023 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18024 // CHECK17: omp.body.continue: 18025 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18026 // CHECK17: omp.inner.for.inc: 18027 // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18028 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 18029 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18030 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 18031 // CHECK17: omp.inner.for.end: 18032 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18033 // CHECK17: omp.loop.exit: 18034 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 18035 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18036 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 18037 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18038 // CHECK17: .omp.final.then: 18039 // CHECK17-NEXT: store i64 11, i64* [[I]], align 8 18040 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] 18041 // CHECK17: .omp.final.done: 18042 // CHECK17-NEXT: ret void 18043 // 18044 // 18045 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 18046 // CHECK18-SAME: () #[[ATTR0:[0-9]+]] { 18047 // CHECK18-NEXT: entry: 18048 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 18049 // CHECK18-NEXT: ret void 18050 // 18051 // 18052 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined. 18053 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 18054 // CHECK18-NEXT: entry: 18055 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18056 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18057 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18058 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 18059 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18060 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18061 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18062 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18063 // CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4 18064 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18065 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18066 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18067 // CHECK18-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 18068 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18069 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18070 // CHECK18-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18071 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18072 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18073 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18074 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 18075 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18076 // CHECK18: cond.true: 18077 // CHECK18-NEXT: br label [[COND_END:%.*]] 18078 // CHECK18: cond.false: 18079 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18080 // CHECK18-NEXT: br label [[COND_END]] 18081 // CHECK18: cond.end: 18082 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18083 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18084 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18085 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 18086 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18087 // CHECK18: omp.inner.for.cond: 18088 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 18089 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 18090 // CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 18091 // CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18092 // CHECK18: omp.inner.for.body: 18093 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 18094 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 18095 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 18096 // CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 18097 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18098 // CHECK18: omp.body.continue: 18099 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18100 // CHECK18: omp.inner.for.inc: 18101 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 18102 // CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 18103 // CHECK18-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 18104 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 18105 // CHECK18: omp.inner.for.end: 18106 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18107 // CHECK18: omp.loop.exit: 18108 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18109 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18110 // CHECK18-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 18111 // CHECK18-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18112 // CHECK18: .omp.final.then: 18113 // CHECK18-NEXT: store i32 33, i32* [[I]], align 4 18114 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18115 // CHECK18: .omp.final.done: 18116 // CHECK18-NEXT: ret void 18117 // 18118 // 18119 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 18120 // CHECK18-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 18121 // CHECK18-NEXT: entry: 18122 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18123 // CHECK18-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 18124 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18125 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 18126 // CHECK18-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 18127 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18128 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18129 // CHECK18-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 18130 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18131 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18132 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 18133 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18134 // CHECK18-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 18135 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 18136 // CHECK18-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 18137 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 18138 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 18139 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 18140 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 18141 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 18142 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 18143 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18144 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 18145 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 18146 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 18147 // CHECK18-NEXT: ret void 18148 // 18149 // 18150 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1 18151 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 18152 // CHECK18-NEXT: entry: 18153 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18154 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18155 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18156 // CHECK18-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 18157 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18158 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18159 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 18160 // CHECK18-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 18161 // CHECK18-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 18162 // CHECK18-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 18163 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18164 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18165 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18166 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18167 // CHECK18-NEXT: [[IT:%.*]] = alloca i64, align 8 18168 // CHECK18-NEXT: [[LIN4:%.*]] = alloca i32, align 4 18169 // CHECK18-NEXT: [[A5:%.*]] = alloca i32, align 4 18170 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18171 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18172 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18173 // CHECK18-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 18174 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18175 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18176 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 18177 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18178 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 18179 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 18180 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 18181 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 18182 // CHECK18-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 18183 // CHECK18-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 18184 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18185 // CHECK18-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 18186 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18187 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18188 // CHECK18-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18189 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 18190 // CHECK18-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 18191 // CHECK18-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18192 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18193 // CHECK18-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 18194 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18195 // CHECK18: cond.true: 18196 // CHECK18-NEXT: br label [[COND_END:%.*]] 18197 // CHECK18: cond.false: 18198 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18199 // CHECK18-NEXT: br label [[COND_END]] 18200 // CHECK18: cond.end: 18201 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 18202 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18203 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18204 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 18205 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18206 // CHECK18: omp.inner.for.cond: 18207 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18208 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17 18209 // CHECK18-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 18210 // CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18211 // CHECK18: omp.inner.for.body: 18212 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18213 // CHECK18-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 18214 // CHECK18-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 18215 // CHECK18-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17 18216 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17 18217 // CHECK18-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 18218 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18219 // CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 18220 // CHECK18-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 18221 // CHECK18-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 18222 // CHECK18-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 18223 // CHECK18-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17 18224 // CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17 18225 // CHECK18-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 18226 // CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18227 // CHECK18-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 18228 // CHECK18-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 18229 // CHECK18-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 18230 // CHECK18-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 18231 // CHECK18-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17 18232 // CHECK18-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !17 18233 // CHECK18-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 18234 // CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 18235 // CHECK18-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 18236 // CHECK18-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !17 18237 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18238 // CHECK18: omp.body.continue: 18239 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18240 // CHECK18: omp.inner.for.inc: 18241 // CHECK18-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18242 // CHECK18-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 18243 // CHECK18-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 18244 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] 18245 // CHECK18: omp.inner.for.end: 18246 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18247 // CHECK18: omp.loop.exit: 18248 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 18249 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18250 // CHECK18-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 18251 // CHECK18-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18252 // CHECK18: .omp.final.then: 18253 // CHECK18-NEXT: store i64 400, i64* [[IT]], align 8 18254 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18255 // CHECK18: .omp.final.done: 18256 // CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18257 // CHECK18-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 18258 // CHECK18-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 18259 // CHECK18: .omp.linear.pu: 18260 // CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 18261 // CHECK18-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 18262 // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 18263 // CHECK18-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 18264 // CHECK18-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 18265 // CHECK18: .omp.linear.pu.done: 18266 // CHECK18-NEXT: ret void 18267 // 18268 // 18269 // CHECK18-LABEL: define {{[^@]+}}@_Z7get_valv 18270 // CHECK18-SAME: () #[[ATTR3:[0-9]+]] { 18271 // CHECK18-NEXT: entry: 18272 // CHECK18-NEXT: ret i64 0 18273 // 18274 // 18275 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 18276 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 18277 // CHECK18-NEXT: entry: 18278 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18279 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18280 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18281 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 18282 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18283 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18284 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18285 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18286 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 18287 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18288 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 18289 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 18290 // CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 18291 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 18292 // CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 18293 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 18294 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 18295 // CHECK18-NEXT: ret void 18296 // 18297 // 18298 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2 18299 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 18300 // CHECK18-NEXT: entry: 18301 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18302 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18303 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18304 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18305 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18306 // CHECK18-NEXT: [[TMP:%.*]] = alloca i16, align 2 18307 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18308 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18309 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18310 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18311 // CHECK18-NEXT: [[IT:%.*]] = alloca i16, align 2 18312 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18313 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18314 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18315 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18316 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18317 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18318 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18319 // CHECK18-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 18320 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18321 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18322 // CHECK18-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18323 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18324 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18325 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18326 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 18327 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18328 // CHECK18: cond.true: 18329 // CHECK18-NEXT: br label [[COND_END:%.*]] 18330 // CHECK18: cond.false: 18331 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18332 // CHECK18-NEXT: br label [[COND_END]] 18333 // CHECK18: cond.end: 18334 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18335 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18336 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18337 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 18338 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18339 // CHECK18: omp.inner.for.cond: 18340 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 18341 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20 18342 // CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 18343 // CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18344 // CHECK18: omp.inner.for.body: 18345 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 18346 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 18347 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 18348 // CHECK18-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 18349 // CHECK18-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20 18350 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !20 18351 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 18352 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !20 18353 // CHECK18-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !20 18354 // CHECK18-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 18355 // CHECK18-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 18356 // CHECK18-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 18357 // CHECK18-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !20 18358 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18359 // CHECK18: omp.body.continue: 18360 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18361 // CHECK18: omp.inner.for.inc: 18362 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 18363 // CHECK18-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 18364 // CHECK18-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 18365 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 18366 // CHECK18: omp.inner.for.end: 18367 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18368 // CHECK18: omp.loop.exit: 18369 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18370 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18371 // CHECK18-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 18372 // CHECK18-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18373 // CHECK18: .omp.final.then: 18374 // CHECK18-NEXT: store i16 22, i16* [[IT]], align 2 18375 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18376 // CHECK18: .omp.final.done: 18377 // CHECK18-NEXT: ret void 18378 // 18379 // 18380 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 18381 // CHECK18-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 18382 // CHECK18-NEXT: entry: 18383 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18384 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 18385 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 18386 // CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 18387 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 18388 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 18389 // CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 18390 // CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 18391 // CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 18392 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 18393 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18394 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 18395 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18396 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 18397 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 18398 // CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 18399 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 18400 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 18401 // CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 18402 // CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 18403 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 18404 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 18405 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18406 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 18407 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 18408 // CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 18409 // CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 18410 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 18411 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 18412 // CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 18413 // CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 18414 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 18415 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 18416 // CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18417 // CHECK18-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 18418 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 18419 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 18420 // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 18421 // CHECK18-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 18422 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 18423 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 18424 // CHECK18-NEXT: ret void 18425 // 18426 // 18427 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3 18428 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 18429 // CHECK18-NEXT: entry: 18430 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18431 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18432 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18433 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 18434 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 18435 // CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 18436 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 18437 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 18438 // CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 18439 // CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 18440 // CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 18441 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 18442 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18443 // CHECK18-NEXT: [[TMP:%.*]] = alloca i8, align 1 18444 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18445 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18446 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18447 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18448 // CHECK18-NEXT: [[IT:%.*]] = alloca i8, align 1 18449 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18450 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18451 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18452 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 18453 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 18454 // CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 18455 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 18456 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 18457 // CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 18458 // CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 18459 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 18460 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 18461 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18462 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 18463 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 18464 // CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 18465 // CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 18466 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 18467 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 18468 // CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 18469 // CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 18470 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 18471 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18472 // CHECK18-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 18473 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18474 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18475 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 18476 // CHECK18-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18477 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 18478 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 18479 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 18480 // CHECK18: omp.dispatch.cond: 18481 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18482 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 18483 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18484 // CHECK18: cond.true: 18485 // CHECK18-NEXT: br label [[COND_END:%.*]] 18486 // CHECK18: cond.false: 18487 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18488 // CHECK18-NEXT: br label [[COND_END]] 18489 // CHECK18: cond.end: 18490 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 18491 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18492 // CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18493 // CHECK18-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 18494 // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18495 // CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18496 // CHECK18-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 18497 // CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 18498 // CHECK18: omp.dispatch.body: 18499 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18500 // CHECK18: omp.inner.for.cond: 18501 // CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 18502 // CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 18503 // CHECK18-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 18504 // CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18505 // CHECK18: omp.inner.for.body: 18506 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 18507 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 18508 // CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 18509 // CHECK18-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 18510 // CHECK18-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23 18511 // CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !23 18512 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 18513 // CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !23 18514 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 18515 // CHECK18-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23 18516 // CHECK18-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 18517 // CHECK18-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 18518 // CHECK18-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 18519 // CHECK18-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23 18520 // CHECK18-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 18521 // CHECK18-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 18522 // CHECK18-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 18523 // CHECK18-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 18524 // CHECK18-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 18525 // CHECK18-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 18526 // CHECK18-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 18527 // CHECK18-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 18528 // CHECK18-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 18529 // CHECK18-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 18530 // CHECK18-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 18531 // CHECK18-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 18532 // CHECK18-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 18533 // CHECK18-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 18534 // CHECK18-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 18535 // CHECK18-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 18536 // CHECK18-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 18537 // CHECK18-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 18538 // CHECK18-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23 18539 // CHECK18-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 18540 // CHECK18-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23 18541 // CHECK18-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 18542 // CHECK18-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23 18543 // CHECK18-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 18544 // CHECK18-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 18545 // CHECK18-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 18546 // CHECK18-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23 18547 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18548 // CHECK18: omp.body.continue: 18549 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18550 // CHECK18: omp.inner.for.inc: 18551 // CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 18552 // CHECK18-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 18553 // CHECK18-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 18554 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 18555 // CHECK18: omp.inner.for.end: 18556 // CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 18557 // CHECK18: omp.dispatch.inc: 18558 // CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18559 // CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 18560 // CHECK18-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 18561 // CHECK18-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 18562 // CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18563 // CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 18564 // CHECK18-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 18565 // CHECK18-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 18566 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND]] 18567 // CHECK18: omp.dispatch.end: 18568 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 18569 // CHECK18-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18570 // CHECK18-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 18571 // CHECK18-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18572 // CHECK18: .omp.final.then: 18573 // CHECK18-NEXT: store i8 96, i8* [[IT]], align 1 18574 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18575 // CHECK18: .omp.final.done: 18576 // CHECK18-NEXT: ret void 18577 // 18578 // 18579 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 18580 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 18581 // CHECK18-NEXT: entry: 18582 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18583 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18584 // CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 18585 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18586 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18587 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 18588 // CHECK18-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 18589 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18590 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18591 // CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 18592 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18593 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18594 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18595 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 18596 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18597 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 18598 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18599 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 18600 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 18601 // CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 18602 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 18603 // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 18604 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 18605 // CHECK18-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 18606 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 18607 // CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 18608 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 18609 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 18610 // CHECK18-NEXT: ret void 18611 // 18612 // 18613 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4 18614 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 18615 // CHECK18-NEXT: entry: 18616 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18617 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18618 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18619 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18620 // CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 18621 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18622 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18623 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 18624 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18625 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18626 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18627 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18628 // CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 18629 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18630 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18631 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18632 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 18633 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18634 // CHECK18-NEXT: ret void 18635 // 18636 // 18637 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 18638 // CHECK18-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 18639 // CHECK18-NEXT: entry: 18640 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 18641 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 18642 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 18643 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 18644 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 18645 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 18646 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 18647 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 18648 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 18649 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 18650 // CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 18651 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 18652 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 18653 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 18654 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 18655 // CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 18656 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 18657 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 18658 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 18659 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 18660 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 18661 // CHECK18-NEXT: ret void 18662 // 18663 // 18664 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5 18665 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 18666 // CHECK18-NEXT: entry: 18667 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18668 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18669 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 18670 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 18671 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 18672 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 18673 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 18674 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18675 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 18676 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18677 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18678 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18679 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18680 // CHECK18-NEXT: [[IT:%.*]] = alloca i64, align 8 18681 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18682 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18683 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 18684 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 18685 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 18686 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 18687 // CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 18688 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 18689 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 18690 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 18691 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 18692 // CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 18693 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18694 // CHECK18-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 18695 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18696 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18697 // CHECK18-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18698 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 18699 // CHECK18-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18700 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18701 // CHECK18-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 18702 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18703 // CHECK18: cond.true: 18704 // CHECK18-NEXT: br label [[COND_END:%.*]] 18705 // CHECK18: cond.false: 18706 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18707 // CHECK18-NEXT: br label [[COND_END]] 18708 // CHECK18: cond.end: 18709 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 18710 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18711 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18712 // CHECK18-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 18713 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18714 // CHECK18: omp.inner.for.cond: 18715 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 18716 // CHECK18-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26 18717 // CHECK18-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 18718 // CHECK18-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18719 // CHECK18: omp.inner.for.body: 18720 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 18721 // CHECK18-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 18722 // CHECK18-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 18723 // CHECK18-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26 18724 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 18725 // CHECK18-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 18726 // CHECK18-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 18727 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 18728 // CHECK18-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group !26 18729 // CHECK18-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 18730 // CHECK18-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !26 18731 // CHECK18-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 18732 // CHECK18-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group !26 18733 // CHECK18-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 18734 // CHECK18-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 18735 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 18736 // CHECK18-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 18737 // CHECK18-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !26 18738 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18739 // CHECK18: omp.body.continue: 18740 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18741 // CHECK18: omp.inner.for.inc: 18742 // CHECK18-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 18743 // CHECK18-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 18744 // CHECK18-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 18745 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 18746 // CHECK18: omp.inner.for.end: 18747 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18748 // CHECK18: omp.loop.exit: 18749 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 18750 // CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18751 // CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 18752 // CHECK18-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18753 // CHECK18: .omp.final.then: 18754 // CHECK18-NEXT: store i64 400, i64* [[IT]], align 8 18755 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18756 // CHECK18: .omp.final.done: 18757 // CHECK18-NEXT: ret void 18758 // 18759 // 18760 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 18761 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 18762 // CHECK18-NEXT: entry: 18763 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18764 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18765 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18766 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18767 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 18768 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18769 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18770 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18771 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18772 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18773 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18774 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 18775 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18776 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 18777 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 18778 // CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 18779 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 18780 // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 18781 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 18782 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 18783 // CHECK18-NEXT: ret void 18784 // 18785 // 18786 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 18787 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 18788 // CHECK18-NEXT: entry: 18789 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18790 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18791 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18792 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18793 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18794 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18795 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 18796 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18797 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18798 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18799 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18800 // CHECK18-NEXT: [[I:%.*]] = alloca i64, align 8 18801 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18802 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18803 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18804 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18805 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18806 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18807 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18808 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18809 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18810 // CHECK18-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 18811 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18812 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18813 // CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18814 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 18815 // CHECK18-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18816 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18817 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 18818 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18819 // CHECK18: cond.true: 18820 // CHECK18-NEXT: br label [[COND_END:%.*]] 18821 // CHECK18: cond.false: 18822 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18823 // CHECK18-NEXT: br label [[COND_END]] 18824 // CHECK18: cond.end: 18825 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 18826 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18827 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18828 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 18829 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18830 // CHECK18: omp.inner.for.cond: 18831 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18832 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29 18833 // CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 18834 // CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18835 // CHECK18: omp.inner.for.body: 18836 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18837 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 18838 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 18839 // CHECK18-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !29 18840 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !29 18841 // CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 18842 // CHECK18-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !29 18843 // CHECK18-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !29 18844 // CHECK18-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 18845 // CHECK18-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 18846 // CHECK18-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 18847 // CHECK18-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !29 18848 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 18849 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !29 18850 // CHECK18-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 18851 // CHECK18-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !29 18852 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18853 // CHECK18: omp.body.continue: 18854 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18855 // CHECK18: omp.inner.for.inc: 18856 // CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18857 // CHECK18-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 18858 // CHECK18-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29 18859 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 18860 // CHECK18: omp.inner.for.end: 18861 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18862 // CHECK18: omp.loop.exit: 18863 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 18864 // CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18865 // CHECK18-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 18866 // CHECK18-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18867 // CHECK18: .omp.final.then: 18868 // CHECK18-NEXT: store i64 11, i64* [[I]], align 8 18869 // CHECK18-NEXT: br label [[DOTOMP_FINAL_DONE]] 18870 // CHECK18: .omp.final.done: 18871 // CHECK18-NEXT: ret void 18872 // 18873 // 18874 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 18875 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] { 18876 // CHECK19-NEXT: entry: 18877 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 18878 // CHECK19-NEXT: ret void 18879 // 18880 // 18881 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined. 18882 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 18883 // CHECK19-NEXT: entry: 18884 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18885 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18886 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18887 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 18888 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18889 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18890 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18891 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18892 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4 18893 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18894 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18895 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18896 // CHECK19-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 18897 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18898 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18899 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18900 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18901 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18902 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18903 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 18904 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18905 // CHECK19: cond.true: 18906 // CHECK19-NEXT: br label [[COND_END:%.*]] 18907 // CHECK19: cond.false: 18908 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18909 // CHECK19-NEXT: br label [[COND_END]] 18910 // CHECK19: cond.end: 18911 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18912 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18913 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18914 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 18915 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18916 // CHECK19: omp.inner.for.cond: 18917 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 18918 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 18919 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 18920 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18921 // CHECK19: omp.inner.for.body: 18922 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 18923 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 18924 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 18925 // CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 18926 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18927 // CHECK19: omp.body.continue: 18928 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18929 // CHECK19: omp.inner.for.inc: 18930 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 18931 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 18932 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 18933 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 18934 // CHECK19: omp.inner.for.end: 18935 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18936 // CHECK19: omp.loop.exit: 18937 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18938 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18939 // CHECK19-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 18940 // CHECK19-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 18941 // CHECK19: .omp.final.then: 18942 // CHECK19-NEXT: store i32 33, i32* [[I]], align 4 18943 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 18944 // CHECK19: .omp.final.done: 18945 // CHECK19-NEXT: ret void 18946 // 18947 // 18948 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 18949 // CHECK19-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 18950 // CHECK19-NEXT: entry: 18951 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18952 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 18953 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18954 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 18955 // CHECK19-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 18956 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18957 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18958 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 18959 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18960 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18961 // CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 18962 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 18963 // CHECK19-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 18964 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 18965 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 18966 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 18967 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 18968 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 18969 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 18970 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 18971 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 18972 // CHECK19-NEXT: ret void 18973 // 18974 // 18975 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1 18976 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 18977 // CHECK19-NEXT: entry: 18978 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18979 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18980 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18981 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 18982 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18983 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18984 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 18985 // CHECK19-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 18986 // CHECK19-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 18987 // CHECK19-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 18988 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18989 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18990 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18991 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18992 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 18993 // CHECK19-NEXT: [[LIN2:%.*]] = alloca i32, align 4 18994 // CHECK19-NEXT: [[A3:%.*]] = alloca i32, align 4 18995 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18996 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18997 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18998 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 18999 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19000 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19001 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 19002 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 19003 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19004 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 19005 // CHECK19-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 19006 // CHECK19-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 19007 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19008 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 19009 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19010 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19011 // CHECK19-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19012 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 19013 // CHECK19-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 19014 // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19015 // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19016 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 19017 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19018 // CHECK19: cond.true: 19019 // CHECK19-NEXT: br label [[COND_END:%.*]] 19020 // CHECK19: cond.false: 19021 // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19022 // CHECK19-NEXT: br label [[COND_END]] 19023 // CHECK19: cond.end: 19024 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 19025 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19026 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19027 // CHECK19-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 19028 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19029 // CHECK19: omp.inner.for.cond: 19030 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19031 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 19032 // CHECK19-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 19033 // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19034 // CHECK19: omp.inner.for.body: 19035 // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19036 // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 19037 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 19038 // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 19039 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18 19040 // CHECK19-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 19041 // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19042 // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 19043 // CHECK19-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 19044 // CHECK19-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 19045 // CHECK19-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 19046 // CHECK19-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18 19047 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18 19048 // CHECK19-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 19049 // CHECK19-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19050 // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 19051 // CHECK19-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 19052 // CHECK19-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 19053 // CHECK19-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 19054 // CHECK19-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18 19055 // CHECK19-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !18 19056 // CHECK19-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 19057 // CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 19058 // CHECK19-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 19059 // CHECK19-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !18 19060 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19061 // CHECK19: omp.body.continue: 19062 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19063 // CHECK19: omp.inner.for.inc: 19064 // CHECK19-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19065 // CHECK19-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 19066 // CHECK19-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19067 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 19068 // CHECK19: omp.inner.for.end: 19069 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19070 // CHECK19: omp.loop.exit: 19071 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 19072 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19073 // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 19074 // CHECK19-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19075 // CHECK19: .omp.final.then: 19076 // CHECK19-NEXT: store i64 400, i64* [[IT]], align 8 19077 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 19078 // CHECK19: .omp.final.done: 19079 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19080 // CHECK19-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 19081 // CHECK19-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 19082 // CHECK19: .omp.linear.pu: 19083 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 19084 // CHECK19-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 19085 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 19086 // CHECK19-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 19087 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 19088 // CHECK19: .omp.linear.pu.done: 19089 // CHECK19-NEXT: ret void 19090 // 19091 // 19092 // CHECK19-LABEL: define {{[^@]+}}@_Z7get_valv 19093 // CHECK19-SAME: () #[[ATTR3:[0-9]+]] { 19094 // CHECK19-NEXT: entry: 19095 // CHECK19-NEXT: ret i64 0 19096 // 19097 // 19098 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 19099 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 19100 // CHECK19-NEXT: entry: 19101 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19102 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19103 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19104 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19105 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19106 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19107 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19108 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 19109 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 19110 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 19111 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 19112 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19113 // CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 19114 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19115 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 19116 // CHECK19-NEXT: ret void 19117 // 19118 // 19119 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2 19120 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 19121 // CHECK19-NEXT: entry: 19122 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19123 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19124 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19125 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19126 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19127 // CHECK19-NEXT: [[TMP:%.*]] = alloca i16, align 2 19128 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19129 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19130 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19131 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19132 // CHECK19-NEXT: [[IT:%.*]] = alloca i16, align 2 19133 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19134 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19135 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19136 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19137 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19138 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19139 // CHECK19-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 19140 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19141 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19142 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19143 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 19144 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 19145 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19146 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 19147 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19148 // CHECK19: cond.true: 19149 // CHECK19-NEXT: br label [[COND_END:%.*]] 19150 // CHECK19: cond.false: 19151 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19152 // CHECK19-NEXT: br label [[COND_END]] 19153 // CHECK19: cond.end: 19154 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 19155 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19156 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19157 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 19158 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19159 // CHECK19: omp.inner.for.cond: 19160 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19161 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 19162 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 19163 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19164 // CHECK19: omp.inner.for.body: 19165 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19166 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 19167 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 19168 // CHECK19-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 19169 // CHECK19-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21 19170 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21 19171 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 19172 // CHECK19-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21 19173 // CHECK19-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !21 19174 // CHECK19-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 19175 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 19176 // CHECK19-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 19177 // CHECK19-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !21 19178 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19179 // CHECK19: omp.body.continue: 19180 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19181 // CHECK19: omp.inner.for.inc: 19182 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19183 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 19184 // CHECK19-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19185 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 19186 // CHECK19: omp.inner.for.end: 19187 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19188 // CHECK19: omp.loop.exit: 19189 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19190 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19191 // CHECK19-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 19192 // CHECK19-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19193 // CHECK19: .omp.final.then: 19194 // CHECK19-NEXT: store i16 22, i16* [[IT]], align 2 19195 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 19196 // CHECK19: .omp.final.done: 19197 // CHECK19-NEXT: ret void 19198 // 19199 // 19200 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 19201 // CHECK19-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 19202 // CHECK19-NEXT: entry: 19203 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19204 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 19205 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19206 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 19207 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 19208 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19209 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 19210 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 19211 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 19212 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 19213 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19214 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 19215 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19216 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 19217 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19218 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 19219 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 19220 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19221 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 19222 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 19223 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 19224 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19225 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 19226 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19227 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 19228 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 19229 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19230 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 19231 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 19232 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 19233 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 19234 // CHECK19-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 19235 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 19236 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19237 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 19238 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 19239 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 19240 // CHECK19-NEXT: ret void 19241 // 19242 // 19243 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3 19244 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 19245 // CHECK19-NEXT: entry: 19246 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19247 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19248 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19249 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 19250 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19251 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 19252 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 19253 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19254 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 19255 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 19256 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 19257 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 19258 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19259 // CHECK19-NEXT: [[TMP:%.*]] = alloca i8, align 1 19260 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19261 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19262 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19263 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19264 // CHECK19-NEXT: [[IT:%.*]] = alloca i8, align 1 19265 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19266 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19267 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19268 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 19269 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19270 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 19271 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 19272 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19273 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 19274 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 19275 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 19276 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19277 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 19278 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19279 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 19280 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 19281 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19282 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 19283 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 19284 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 19285 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19286 // CHECK19-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 19287 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19288 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19289 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19290 // CHECK19-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19291 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 19292 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 19293 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 19294 // CHECK19: omp.dispatch.cond: 19295 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19296 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 19297 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19298 // CHECK19: cond.true: 19299 // CHECK19-NEXT: br label [[COND_END:%.*]] 19300 // CHECK19: cond.false: 19301 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19302 // CHECK19-NEXT: br label [[COND_END]] 19303 // CHECK19: cond.end: 19304 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 19305 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19306 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19307 // CHECK19-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 19308 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19309 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19310 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 19311 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 19312 // CHECK19: omp.dispatch.body: 19313 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19314 // CHECK19: omp.inner.for.cond: 19315 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 19316 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 19317 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 19318 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19319 // CHECK19: omp.inner.for.body: 19320 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 19321 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 19322 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 19323 // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 19324 // CHECK19-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24 19325 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24 19326 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 19327 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24 19328 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 19329 // CHECK19-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24 19330 // CHECK19-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 19331 // CHECK19-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 19332 // CHECK19-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 19333 // CHECK19-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24 19334 // CHECK19-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 19335 // CHECK19-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 19336 // CHECK19-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 19337 // CHECK19-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 19338 // CHECK19-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 19339 // CHECK19-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 19340 // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 19341 // CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 19342 // CHECK19-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 19343 // CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 19344 // CHECK19-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 19345 // CHECK19-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 19346 // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 19347 // CHECK19-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 19348 // CHECK19-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 19349 // CHECK19-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 19350 // CHECK19-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 19351 // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 19352 // CHECK19-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24 19353 // CHECK19-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 19354 // CHECK19-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24 19355 // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 19356 // CHECK19-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24 19357 // CHECK19-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 19358 // CHECK19-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 19359 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 19360 // CHECK19-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24 19361 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19362 // CHECK19: omp.body.continue: 19363 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19364 // CHECK19: omp.inner.for.inc: 19365 // CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 19366 // CHECK19-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 19367 // CHECK19-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 19368 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 19369 // CHECK19: omp.inner.for.end: 19370 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 19371 // CHECK19: omp.dispatch.inc: 19372 // CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19373 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 19374 // CHECK19-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 19375 // CHECK19-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 19376 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19377 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 19378 // CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 19379 // CHECK19-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 19380 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] 19381 // CHECK19: omp.dispatch.end: 19382 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 19383 // CHECK19-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19384 // CHECK19-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 19385 // CHECK19-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19386 // CHECK19: .omp.final.then: 19387 // CHECK19-NEXT: store i8 96, i8* [[IT]], align 1 19388 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 19389 // CHECK19: .omp.final.done: 19390 // CHECK19-NEXT: ret void 19391 // 19392 // 19393 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 19394 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 19395 // CHECK19-NEXT: entry: 19396 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19397 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19398 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 19399 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19400 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19401 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19402 // CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 19403 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19404 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19405 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 19406 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19407 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19408 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 19409 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19410 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19411 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 19412 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 19413 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 19414 // CHECK19-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19415 // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 19416 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19417 // CHECK19-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 19418 // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 19419 // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 19420 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 19421 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 19422 // CHECK19-NEXT: ret void 19423 // 19424 // 19425 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4 19426 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 19427 // CHECK19-NEXT: entry: 19428 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19429 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19430 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19431 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19432 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 19433 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19434 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19435 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 19436 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19437 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19438 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19439 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19440 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 19441 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19442 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19443 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 19444 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19445 // CHECK19-NEXT: ret void 19446 // 19447 // 19448 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 19449 // CHECK19-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 19450 // CHECK19-NEXT: entry: 19451 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 19452 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 19453 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19454 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19455 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 19456 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 19457 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 19458 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 19459 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19460 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19461 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 19462 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 19463 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19464 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19465 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 19466 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 19467 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 19468 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 19469 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 19470 // CHECK19-NEXT: ret void 19471 // 19472 // 19473 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 19474 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 19475 // CHECK19-NEXT: entry: 19476 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19477 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19478 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 19479 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 19480 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19481 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19482 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 19483 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19484 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 19485 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19486 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19487 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19488 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19489 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 19490 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19491 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19492 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 19493 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 19494 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19495 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19496 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 19497 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 19498 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19499 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19500 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 19501 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19502 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 19503 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19504 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19505 // CHECK19-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19506 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 19507 // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19508 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19509 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 19510 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19511 // CHECK19: cond.true: 19512 // CHECK19-NEXT: br label [[COND_END:%.*]] 19513 // CHECK19: cond.false: 19514 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19515 // CHECK19-NEXT: br label [[COND_END]] 19516 // CHECK19: cond.end: 19517 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 19518 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19519 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19520 // CHECK19-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 19521 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19522 // CHECK19: omp.inner.for.cond: 19523 // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 19524 // CHECK19-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27 19525 // CHECK19-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 19526 // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19527 // CHECK19: omp.inner.for.body: 19528 // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 19529 // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 19530 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 19531 // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27 19532 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27 19533 // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 19534 // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 19535 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 19536 // CHECK19-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group !27 19537 // CHECK19-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 19538 // CHECK19-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !27 19539 // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 19540 // CHECK19-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group !27 19541 // CHECK19-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 19542 // CHECK19-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 19543 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 19544 // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 19545 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !27 19546 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19547 // CHECK19: omp.body.continue: 19548 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19549 // CHECK19: omp.inner.for.inc: 19550 // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 19551 // CHECK19-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 19552 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 19553 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 19554 // CHECK19: omp.inner.for.end: 19555 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19556 // CHECK19: omp.loop.exit: 19557 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 19558 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19559 // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 19560 // CHECK19-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19561 // CHECK19: .omp.final.then: 19562 // CHECK19-NEXT: store i64 400, i64* [[IT]], align 8 19563 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 19564 // CHECK19: .omp.final.done: 19565 // CHECK19-NEXT: ret void 19566 // 19567 // 19568 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 19569 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 19570 // CHECK19-NEXT: entry: 19571 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19572 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19573 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19574 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19575 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19576 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19577 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19578 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19579 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19580 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19581 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19582 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 19583 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 19584 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 19585 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19586 // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 19587 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19588 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 19589 // CHECK19-NEXT: ret void 19590 // 19591 // 19592 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 19593 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 19594 // CHECK19-NEXT: entry: 19595 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19596 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19597 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19598 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19599 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19600 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19601 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 19602 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19603 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19604 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19605 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19606 // CHECK19-NEXT: [[I:%.*]] = alloca i64, align 8 19607 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19608 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19609 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19610 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19611 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19612 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19613 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19614 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19615 // CHECK19-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 19616 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19617 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19618 // CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19619 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 19620 // CHECK19-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19621 // CHECK19-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19622 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 19623 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19624 // CHECK19: cond.true: 19625 // CHECK19-NEXT: br label [[COND_END:%.*]] 19626 // CHECK19: cond.false: 19627 // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19628 // CHECK19-NEXT: br label [[COND_END]] 19629 // CHECK19: cond.end: 19630 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 19631 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19632 // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19633 // CHECK19-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 19634 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19635 // CHECK19: omp.inner.for.cond: 19636 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 19637 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 19638 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 19639 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19640 // CHECK19: omp.inner.for.body: 19641 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 19642 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 19643 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 19644 // CHECK19-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !30 19645 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !30 19646 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 19647 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !30 19648 // CHECK19-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 19649 // CHECK19-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 19650 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 19651 // CHECK19-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 19652 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !30 19653 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 19654 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !30 19655 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 19656 // CHECK19-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !30 19657 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19658 // CHECK19: omp.body.continue: 19659 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19660 // CHECK19: omp.inner.for.inc: 19661 // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 19662 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 19663 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 19664 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 19665 // CHECK19: omp.inner.for.end: 19666 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19667 // CHECK19: omp.loop.exit: 19668 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 19669 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19670 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 19671 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19672 // CHECK19: .omp.final.then: 19673 // CHECK19-NEXT: store i64 11, i64* [[I]], align 8 19674 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] 19675 // CHECK19: .omp.final.done: 19676 // CHECK19-NEXT: ret void 19677 // 19678 // 19679 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 19680 // CHECK20-SAME: () #[[ATTR0:[0-9]+]] { 19681 // CHECK20-NEXT: entry: 19682 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 19683 // CHECK20-NEXT: ret void 19684 // 19685 // 19686 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined. 19687 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 19688 // CHECK20-NEXT: entry: 19689 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19690 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19691 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19692 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 19693 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19694 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19695 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19696 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19697 // CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4 19698 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19699 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19700 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19701 // CHECK20-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 19702 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19703 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19704 // CHECK20-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19705 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 19706 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 19707 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19708 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 19709 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19710 // CHECK20: cond.true: 19711 // CHECK20-NEXT: br label [[COND_END:%.*]] 19712 // CHECK20: cond.false: 19713 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19714 // CHECK20-NEXT: br label [[COND_END]] 19715 // CHECK20: cond.end: 19716 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 19717 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19718 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19719 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 19720 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19721 // CHECK20: omp.inner.for.cond: 19722 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 19723 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 19724 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 19725 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19726 // CHECK20: omp.inner.for.body: 19727 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 19728 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 19729 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 19730 // CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 19731 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19732 // CHECK20: omp.body.continue: 19733 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19734 // CHECK20: omp.inner.for.inc: 19735 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 19736 // CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 19737 // CHECK20-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 19738 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 19739 // CHECK20: omp.inner.for.end: 19740 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19741 // CHECK20: omp.loop.exit: 19742 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19743 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19744 // CHECK20-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 19745 // CHECK20-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19746 // CHECK20: .omp.final.then: 19747 // CHECK20-NEXT: store i32 33, i32* [[I]], align 4 19748 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 19749 // CHECK20: .omp.final.done: 19750 // CHECK20-NEXT: ret void 19751 // 19752 // 19753 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 19754 // CHECK20-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 19755 // CHECK20-NEXT: entry: 19756 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19757 // CHECK20-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 19758 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19759 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19760 // CHECK20-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 19761 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19762 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19763 // CHECK20-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 19764 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19765 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19766 // CHECK20-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 19767 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19768 // CHECK20-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 19769 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19770 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 19771 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 19772 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 19773 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 19774 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 19775 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 19776 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 19777 // CHECK20-NEXT: ret void 19778 // 19779 // 19780 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1 19781 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 19782 // CHECK20-NEXT: entry: 19783 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19784 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19785 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19786 // CHECK20-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 19787 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19788 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19789 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 19790 // CHECK20-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 19791 // CHECK20-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 19792 // CHECK20-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 19793 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19794 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19795 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19796 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19797 // CHECK20-NEXT: [[IT:%.*]] = alloca i64, align 8 19798 // CHECK20-NEXT: [[LIN2:%.*]] = alloca i32, align 4 19799 // CHECK20-NEXT: [[A3:%.*]] = alloca i32, align 4 19800 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19801 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19802 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19803 // CHECK20-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 19804 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19805 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19806 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 19807 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 19808 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19809 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 19810 // CHECK20-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 19811 // CHECK20-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 19812 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19813 // CHECK20-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 19814 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19815 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19816 // CHECK20-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19817 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 19818 // CHECK20-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 19819 // CHECK20-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19820 // CHECK20-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19821 // CHECK20-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 19822 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19823 // CHECK20: cond.true: 19824 // CHECK20-NEXT: br label [[COND_END:%.*]] 19825 // CHECK20: cond.false: 19826 // CHECK20-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19827 // CHECK20-NEXT: br label [[COND_END]] 19828 // CHECK20: cond.end: 19829 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 19830 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19831 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19832 // CHECK20-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 19833 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19834 // CHECK20: omp.inner.for.cond: 19835 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19836 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 19837 // CHECK20-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 19838 // CHECK20-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19839 // CHECK20: omp.inner.for.body: 19840 // CHECK20-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19841 // CHECK20-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 19842 // CHECK20-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 19843 // CHECK20-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 19844 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18 19845 // CHECK20-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 19846 // CHECK20-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19847 // CHECK20-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 19848 // CHECK20-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 19849 // CHECK20-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 19850 // CHECK20-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 19851 // CHECK20-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18 19852 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18 19853 // CHECK20-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 19854 // CHECK20-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19855 // CHECK20-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 19856 // CHECK20-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 19857 // CHECK20-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 19858 // CHECK20-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 19859 // CHECK20-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18 19860 // CHECK20-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !18 19861 // CHECK20-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 19862 // CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 19863 // CHECK20-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 19864 // CHECK20-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !18 19865 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19866 // CHECK20: omp.body.continue: 19867 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19868 // CHECK20: omp.inner.for.inc: 19869 // CHECK20-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19870 // CHECK20-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 19871 // CHECK20-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 19872 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 19873 // CHECK20: omp.inner.for.end: 19874 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19875 // CHECK20: omp.loop.exit: 19876 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 19877 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19878 // CHECK20-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 19879 // CHECK20-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19880 // CHECK20: .omp.final.then: 19881 // CHECK20-NEXT: store i64 400, i64* [[IT]], align 8 19882 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 19883 // CHECK20: .omp.final.done: 19884 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19885 // CHECK20-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 19886 // CHECK20-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 19887 // CHECK20: .omp.linear.pu: 19888 // CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 19889 // CHECK20-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 19890 // CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 19891 // CHECK20-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 19892 // CHECK20-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 19893 // CHECK20: .omp.linear.pu.done: 19894 // CHECK20-NEXT: ret void 19895 // 19896 // 19897 // CHECK20-LABEL: define {{[^@]+}}@_Z7get_valv 19898 // CHECK20-SAME: () #[[ATTR3:[0-9]+]] { 19899 // CHECK20-NEXT: entry: 19900 // CHECK20-NEXT: ret i64 0 19901 // 19902 // 19903 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 19904 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 19905 // CHECK20-NEXT: entry: 19906 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19907 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19908 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19909 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19910 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19911 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19912 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19913 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 19914 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 19915 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 19916 // CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 19917 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19918 // CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 19919 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19920 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 19921 // CHECK20-NEXT: ret void 19922 // 19923 // 19924 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2 19925 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 19926 // CHECK20-NEXT: entry: 19927 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19928 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19929 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19930 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19931 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19932 // CHECK20-NEXT: [[TMP:%.*]] = alloca i16, align 2 19933 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19934 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19935 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19936 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19937 // CHECK20-NEXT: [[IT:%.*]] = alloca i16, align 2 19938 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19939 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19940 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19941 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19942 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19943 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19944 // CHECK20-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 19945 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19946 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19947 // CHECK20-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19948 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 19949 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 19950 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19951 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 19952 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19953 // CHECK20: cond.true: 19954 // CHECK20-NEXT: br label [[COND_END:%.*]] 19955 // CHECK20: cond.false: 19956 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19957 // CHECK20-NEXT: br label [[COND_END]] 19958 // CHECK20: cond.end: 19959 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 19960 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19961 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19962 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 19963 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19964 // CHECK20: omp.inner.for.cond: 19965 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19966 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 19967 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 19968 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19969 // CHECK20: omp.inner.for.body: 19970 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19971 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 19972 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 19973 // CHECK20-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 19974 // CHECK20-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21 19975 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21 19976 // CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 19977 // CHECK20-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21 19978 // CHECK20-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !21 19979 // CHECK20-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 19980 // CHECK20-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 19981 // CHECK20-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 19982 // CHECK20-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !21 19983 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19984 // CHECK20: omp.body.continue: 19985 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19986 // CHECK20: omp.inner.for.inc: 19987 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19988 // CHECK20-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 19989 // CHECK20-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 19990 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 19991 // CHECK20: omp.inner.for.end: 19992 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19993 // CHECK20: omp.loop.exit: 19994 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19995 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19996 // CHECK20-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 19997 // CHECK20-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 19998 // CHECK20: .omp.final.then: 19999 // CHECK20-NEXT: store i16 22, i16* [[IT]], align 2 20000 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 20001 // CHECK20: .omp.final.done: 20002 // CHECK20-NEXT: ret void 20003 // 20004 // 20005 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 20006 // CHECK20-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 20007 // CHECK20-NEXT: entry: 20008 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20009 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 20010 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 20011 // CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 20012 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 20013 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 20014 // CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 20015 // CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 20016 // CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 20017 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 20018 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 20019 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 20020 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20021 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 20022 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 20023 // CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 20024 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 20025 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 20026 // CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 20027 // CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 20028 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 20029 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 20030 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 20031 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 20032 // CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 20033 // CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 20034 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 20035 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 20036 // CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 20037 // CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 20038 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 20039 // CHECK20-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 20040 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 20041 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 20042 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 20043 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 20044 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 20045 // CHECK20-NEXT: ret void 20046 // 20047 // 20048 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3 20049 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 20050 // CHECK20-NEXT: entry: 20051 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 20052 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 20053 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20054 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 20055 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 20056 // CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 20057 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 20058 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 20059 // CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 20060 // CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 20061 // CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 20062 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 20063 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 20064 // CHECK20-NEXT: [[TMP:%.*]] = alloca i8, align 1 20065 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 20066 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 20067 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 20068 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20069 // CHECK20-NEXT: [[IT:%.*]] = alloca i8, align 1 20070 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 20071 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 20072 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20073 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 20074 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 20075 // CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 20076 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 20077 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 20078 // CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 20079 // CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 20080 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 20081 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 20082 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 20083 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 20084 // CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 20085 // CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 20086 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 20087 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 20088 // CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 20089 // CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 20090 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 20091 // CHECK20-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 20092 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 20093 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20094 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 20095 // CHECK20-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 20096 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 20097 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 20098 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 20099 // CHECK20: omp.dispatch.cond: 20100 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20101 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 20102 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20103 // CHECK20: cond.true: 20104 // CHECK20-NEXT: br label [[COND_END:%.*]] 20105 // CHECK20: cond.false: 20106 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20107 // CHECK20-NEXT: br label [[COND_END]] 20108 // CHECK20: cond.end: 20109 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 20110 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 20111 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20112 // CHECK20-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 20113 // CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 20114 // CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20115 // CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 20116 // CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 20117 // CHECK20: omp.dispatch.body: 20118 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20119 // CHECK20: omp.inner.for.cond: 20120 // CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 20121 // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 20122 // CHECK20-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 20123 // CHECK20-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20124 // CHECK20: omp.inner.for.body: 20125 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 20126 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 20127 // CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 20128 // CHECK20-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 20129 // CHECK20-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24 20130 // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24 20131 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 20132 // CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24 20133 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 20134 // CHECK20-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24 20135 // CHECK20-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 20136 // CHECK20-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 20137 // CHECK20-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 20138 // CHECK20-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24 20139 // CHECK20-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 20140 // CHECK20-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 20141 // CHECK20-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 20142 // CHECK20-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 20143 // CHECK20-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 20144 // CHECK20-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 20145 // CHECK20-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 20146 // CHECK20-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 20147 // CHECK20-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 20148 // CHECK20-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 20149 // CHECK20-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 20150 // CHECK20-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 20151 // CHECK20-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 20152 // CHECK20-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 20153 // CHECK20-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 20154 // CHECK20-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 20155 // CHECK20-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 20156 // CHECK20-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 20157 // CHECK20-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24 20158 // CHECK20-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 20159 // CHECK20-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24 20160 // CHECK20-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 20161 // CHECK20-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24 20162 // CHECK20-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 20163 // CHECK20-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 20164 // CHECK20-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 20165 // CHECK20-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24 20166 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20167 // CHECK20: omp.body.continue: 20168 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20169 // CHECK20: omp.inner.for.inc: 20170 // CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 20171 // CHECK20-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 20172 // CHECK20-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 20173 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 20174 // CHECK20: omp.inner.for.end: 20175 // CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 20176 // CHECK20: omp.dispatch.inc: 20177 // CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20178 // CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 20179 // CHECK20-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 20180 // CHECK20-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 20181 // CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20182 // CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 20183 // CHECK20-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 20184 // CHECK20-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 20185 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND]] 20186 // CHECK20: omp.dispatch.end: 20187 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 20188 // CHECK20-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20189 // CHECK20-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 20190 // CHECK20-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20191 // CHECK20: .omp.final.then: 20192 // CHECK20-NEXT: store i8 96, i8* [[IT]], align 1 20193 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 20194 // CHECK20: .omp.final.done: 20195 // CHECK20-NEXT: ret void 20196 // 20197 // 20198 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 20199 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 20200 // CHECK20-NEXT: entry: 20201 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20202 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 20203 // CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 20204 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 20205 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 20206 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 20207 // CHECK20-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 20208 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20209 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 20210 // CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 20211 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 20212 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 20213 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 20214 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 20215 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 20216 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 20217 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 20218 // CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 20219 // CHECK20-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 20220 // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 20221 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 20222 // CHECK20-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 20223 // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 20224 // CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 20225 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 20226 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 20227 // CHECK20-NEXT: ret void 20228 // 20229 // 20230 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4 20231 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 20232 // CHECK20-NEXT: entry: 20233 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 20234 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 20235 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20236 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 20237 // CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 20238 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 20239 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 20240 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 20241 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 20242 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 20243 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20244 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 20245 // CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 20246 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 20247 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 20248 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 20249 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 20250 // CHECK20-NEXT: ret void 20251 // 20252 // 20253 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 20254 // CHECK20-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 20255 // CHECK20-NEXT: entry: 20256 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 20257 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 20258 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 20259 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 20260 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 20261 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 20262 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 20263 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 20264 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 20265 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 20266 // CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 20267 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 20268 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 20269 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 20270 // CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 20271 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 20272 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 20273 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 20274 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 20275 // CHECK20-NEXT: ret void 20276 // 20277 // 20278 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5 20279 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 20280 // CHECK20-NEXT: entry: 20281 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 20282 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 20283 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 20284 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 20285 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 20286 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 20287 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 20288 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 20289 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 20290 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 20291 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 20292 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 20293 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20294 // CHECK20-NEXT: [[IT:%.*]] = alloca i64, align 8 20295 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 20296 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 20297 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 20298 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 20299 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 20300 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 20301 // CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 20302 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 20303 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 20304 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 20305 // CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 20306 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 20307 // CHECK20-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 20308 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 20309 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20310 // CHECK20-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 20311 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 20312 // CHECK20-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 20313 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20314 // CHECK20-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 20315 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20316 // CHECK20: cond.true: 20317 // CHECK20-NEXT: br label [[COND_END:%.*]] 20318 // CHECK20: cond.false: 20319 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20320 // CHECK20-NEXT: br label [[COND_END]] 20321 // CHECK20: cond.end: 20322 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 20323 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 20324 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 20325 // CHECK20-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 20326 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20327 // CHECK20: omp.inner.for.cond: 20328 // CHECK20-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 20329 // CHECK20-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27 20330 // CHECK20-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 20331 // CHECK20-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20332 // CHECK20: omp.inner.for.body: 20333 // CHECK20-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 20334 // CHECK20-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 20335 // CHECK20-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 20336 // CHECK20-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27 20337 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27 20338 // CHECK20-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 20339 // CHECK20-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 20340 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 20341 // CHECK20-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group !27 20342 // CHECK20-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 20343 // CHECK20-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !27 20344 // CHECK20-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 20345 // CHECK20-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group !27 20346 // CHECK20-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 20347 // CHECK20-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 20348 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 20349 // CHECK20-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 20350 // CHECK20-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !27 20351 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20352 // CHECK20: omp.body.continue: 20353 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20354 // CHECK20: omp.inner.for.inc: 20355 // CHECK20-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 20356 // CHECK20-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 20357 // CHECK20-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 20358 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 20359 // CHECK20: omp.inner.for.end: 20360 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 20361 // CHECK20: omp.loop.exit: 20362 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 20363 // CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20364 // CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 20365 // CHECK20-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20366 // CHECK20: .omp.final.then: 20367 // CHECK20-NEXT: store i64 400, i64* [[IT]], align 8 20368 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 20369 // CHECK20: .omp.final.done: 20370 // CHECK20-NEXT: ret void 20371 // 20372 // 20373 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 20374 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 20375 // CHECK20-NEXT: entry: 20376 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20377 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 20378 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 20379 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 20380 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 20381 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20382 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 20383 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 20384 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 20385 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 20386 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 20387 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 20388 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 20389 // CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 20390 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 20391 // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 20392 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 20393 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 20394 // CHECK20-NEXT: ret void 20395 // 20396 // 20397 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 20398 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 20399 // CHECK20-NEXT: entry: 20400 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 20401 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 20402 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 20403 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 20404 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 20405 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 20406 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 20407 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 20408 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 20409 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 20410 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20411 // CHECK20-NEXT: [[I:%.*]] = alloca i64, align 8 20412 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 20413 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 20414 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 20415 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 20416 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 20417 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 20418 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 20419 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 20420 // CHECK20-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 20421 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 20422 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20423 // CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 20424 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 20425 // CHECK20-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 20426 // CHECK20-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20427 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 20428 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20429 // CHECK20: cond.true: 20430 // CHECK20-NEXT: br label [[COND_END:%.*]] 20431 // CHECK20: cond.false: 20432 // CHECK20-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20433 // CHECK20-NEXT: br label [[COND_END]] 20434 // CHECK20: cond.end: 20435 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 20436 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 20437 // CHECK20-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 20438 // CHECK20-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 20439 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20440 // CHECK20: omp.inner.for.cond: 20441 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 20442 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30 20443 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 20444 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20445 // CHECK20: omp.inner.for.body: 20446 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 20447 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 20448 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 20449 // CHECK20-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !30 20450 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !30 20451 // CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 20452 // CHECK20-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !30 20453 // CHECK20-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !30 20454 // CHECK20-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 20455 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 20456 // CHECK20-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 20457 // CHECK20-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !30 20458 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 20459 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !30 20460 // CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 20461 // CHECK20-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !30 20462 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20463 // CHECK20: omp.body.continue: 20464 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20465 // CHECK20: omp.inner.for.inc: 20466 // CHECK20-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 20467 // CHECK20-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 20468 // CHECK20-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30 20469 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 20470 // CHECK20: omp.inner.for.end: 20471 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 20472 // CHECK20: omp.loop.exit: 20473 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 20474 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20475 // CHECK20-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 20476 // CHECK20-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20477 // CHECK20: .omp.final.then: 20478 // CHECK20-NEXT: store i64 11, i64* [[I]], align 8 20479 // CHECK20-NEXT: br label [[DOTOMP_FINAL_DONE]] 20480 // CHECK20: .omp.final.done: 20481 // CHECK20-NEXT: ret void 20482 // 20483 // 20484 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 20485 // CHECK21-SAME: () #[[ATTR0:[0-9]+]] { 20486 // CHECK21-NEXT: entry: 20487 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 20488 // CHECK21-NEXT: ret void 20489 // 20490 // 20491 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined. 20492 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 20493 // CHECK21-NEXT: entry: 20494 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 20495 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 20496 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 20497 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4 20498 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 20499 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 20500 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 20501 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20502 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4 20503 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 20504 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 20505 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 20506 // CHECK21-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 20507 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 20508 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20509 // CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 20510 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 20511 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 20512 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20513 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 20514 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20515 // CHECK21: cond.true: 20516 // CHECK21-NEXT: br label [[COND_END:%.*]] 20517 // CHECK21: cond.false: 20518 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20519 // CHECK21-NEXT: br label [[COND_END]] 20520 // CHECK21: cond.end: 20521 // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 20522 // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 20523 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20524 // CHECK21-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 20525 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20526 // CHECK21: omp.inner.for.cond: 20527 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 20528 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 20529 // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 20530 // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20531 // CHECK21: omp.inner.for.body: 20532 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 20533 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 20534 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 20535 // CHECK21-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 20536 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20537 // CHECK21: omp.body.continue: 20538 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20539 // CHECK21: omp.inner.for.inc: 20540 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 20541 // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 20542 // CHECK21-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 20543 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 20544 // CHECK21: omp.inner.for.end: 20545 // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 20546 // CHECK21: omp.loop.exit: 20547 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 20548 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20549 // CHECK21-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 20550 // CHECK21-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20551 // CHECK21: .omp.final.then: 20552 // CHECK21-NEXT: store i32 33, i32* [[I]], align 4 20553 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 20554 // CHECK21: .omp.final.done: 20555 // CHECK21-NEXT: ret void 20556 // 20557 // 20558 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 20559 // CHECK21-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 20560 // CHECK21-NEXT: entry: 20561 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 20562 // CHECK21-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 20563 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20564 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 20565 // CHECK21-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 20566 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 20567 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 20568 // CHECK21-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 20569 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20570 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 20571 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 20572 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20573 // CHECK21-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 20574 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 20575 // CHECK21-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 20576 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 20577 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 20578 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 20579 // CHECK21-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 20580 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 20581 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 20582 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 20583 // CHECK21-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 20584 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 20585 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 20586 // CHECK21-NEXT: ret void 20587 // 20588 // 20589 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..1 20590 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 20591 // CHECK21-NEXT: entry: 20592 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 20593 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 20594 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 20595 // CHECK21-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 20596 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20597 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 20598 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 20599 // CHECK21-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 20600 // CHECK21-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 20601 // CHECK21-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 20602 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 20603 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 20604 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 20605 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20606 // CHECK21-NEXT: [[IT:%.*]] = alloca i64, align 8 20607 // CHECK21-NEXT: [[LIN4:%.*]] = alloca i32, align 4 20608 // CHECK21-NEXT: [[A5:%.*]] = alloca i32, align 4 20609 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 20610 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 20611 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 20612 // CHECK21-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 20613 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20614 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 20615 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 20616 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20617 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 20618 // CHECK21-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 20619 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 20620 // CHECK21-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 20621 // CHECK21-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 20622 // CHECK21-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 20623 // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 20624 // CHECK21-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 20625 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 20626 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20627 // CHECK21-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 20628 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 20629 // CHECK21-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 20630 // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 20631 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20632 // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 20633 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20634 // CHECK21: cond.true: 20635 // CHECK21-NEXT: br label [[COND_END:%.*]] 20636 // CHECK21: cond.false: 20637 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 20638 // CHECK21-NEXT: br label [[COND_END]] 20639 // CHECK21: cond.end: 20640 // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 20641 // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 20642 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 20643 // CHECK21-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 20644 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20645 // CHECK21: omp.inner.for.cond: 20646 // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20647 // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17 20648 // CHECK21-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 20649 // CHECK21-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20650 // CHECK21: omp.inner.for.body: 20651 // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20652 // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 20653 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 20654 // CHECK21-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17 20655 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17 20656 // CHECK21-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 20657 // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20658 // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 20659 // CHECK21-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 20660 // CHECK21-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 20661 // CHECK21-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 20662 // CHECK21-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17 20663 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17 20664 // CHECK21-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 20665 // CHECK21-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20666 // CHECK21-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 20667 // CHECK21-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 20668 // CHECK21-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 20669 // CHECK21-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 20670 // CHECK21-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17 20671 // CHECK21-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !17 20672 // CHECK21-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 20673 // CHECK21-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 20674 // CHECK21-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 20675 // CHECK21-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !17 20676 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20677 // CHECK21: omp.body.continue: 20678 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20679 // CHECK21: omp.inner.for.inc: 20680 // CHECK21-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20681 // CHECK21-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 20682 // CHECK21-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 20683 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] 20684 // CHECK21: omp.inner.for.end: 20685 // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 20686 // CHECK21: omp.loop.exit: 20687 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 20688 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20689 // CHECK21-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 20690 // CHECK21-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20691 // CHECK21: .omp.final.then: 20692 // CHECK21-NEXT: store i64 400, i64* [[IT]], align 8 20693 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 20694 // CHECK21: .omp.final.done: 20695 // CHECK21-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20696 // CHECK21-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 20697 // CHECK21-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 20698 // CHECK21: .omp.linear.pu: 20699 // CHECK21-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 20700 // CHECK21-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 20701 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 20702 // CHECK21-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 20703 // CHECK21-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 20704 // CHECK21: .omp.linear.pu.done: 20705 // CHECK21-NEXT: ret void 20706 // 20707 // 20708 // CHECK21-LABEL: define {{[^@]+}}@_Z7get_valv 20709 // CHECK21-SAME: () #[[ATTR3:[0-9]+]] { 20710 // CHECK21-NEXT: entry: 20711 // CHECK21-NEXT: ret i64 0 20712 // 20713 // 20714 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 20715 // CHECK21-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 20716 // CHECK21-NEXT: entry: 20717 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20718 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 20719 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 20720 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 20721 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20722 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 20723 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20724 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 20725 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 20726 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 20727 // CHECK21-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 20728 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 20729 // CHECK21-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 20730 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 20731 // CHECK21-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 20732 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 20733 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 20734 // CHECK21-NEXT: ret void 20735 // 20736 // 20737 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..2 20738 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 20739 // CHECK21-NEXT: entry: 20740 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 20741 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 20742 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20743 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 20744 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 20745 // CHECK21-NEXT: [[TMP:%.*]] = alloca i16, align 2 20746 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 20747 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 20748 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 20749 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20750 // CHECK21-NEXT: [[IT:%.*]] = alloca i16, align 2 20751 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 20752 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 20753 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20754 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 20755 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20756 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 20757 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 20758 // CHECK21-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 20759 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 20760 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20761 // CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 20762 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 20763 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 20764 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20765 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 20766 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20767 // CHECK21: cond.true: 20768 // CHECK21-NEXT: br label [[COND_END:%.*]] 20769 // CHECK21: cond.false: 20770 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20771 // CHECK21-NEXT: br label [[COND_END]] 20772 // CHECK21: cond.end: 20773 // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 20774 // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 20775 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20776 // CHECK21-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 20777 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20778 // CHECK21: omp.inner.for.cond: 20779 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 20780 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20 20781 // CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 20782 // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20783 // CHECK21: omp.inner.for.body: 20784 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 20785 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 20786 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 20787 // CHECK21-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 20788 // CHECK21-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20 20789 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !20 20790 // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 20791 // CHECK21-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !20 20792 // CHECK21-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !20 20793 // CHECK21-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 20794 // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 20795 // CHECK21-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 20796 // CHECK21-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !20 20797 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20798 // CHECK21: omp.body.continue: 20799 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20800 // CHECK21: omp.inner.for.inc: 20801 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 20802 // CHECK21-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 20803 // CHECK21-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 20804 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 20805 // CHECK21: omp.inner.for.end: 20806 // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 20807 // CHECK21: omp.loop.exit: 20808 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 20809 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 20810 // CHECK21-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 20811 // CHECK21-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 20812 // CHECK21: .omp.final.then: 20813 // CHECK21-NEXT: store i16 22, i16* [[IT]], align 2 20814 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 20815 // CHECK21: .omp.final.done: 20816 // CHECK21-NEXT: ret void 20817 // 20818 // 20819 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 20820 // CHECK21-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 20821 // CHECK21-NEXT: entry: 20822 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20823 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 20824 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 20825 // CHECK21-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 20826 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 20827 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 20828 // CHECK21-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 20829 // CHECK21-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 20830 // CHECK21-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 20831 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 20832 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 20833 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 20834 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20835 // CHECK21-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 20836 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 20837 // CHECK21-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 20838 // CHECK21-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 20839 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 20840 // CHECK21-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 20841 // CHECK21-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 20842 // CHECK21-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 20843 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 20844 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20845 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 20846 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 20847 // CHECK21-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 20848 // CHECK21-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 20849 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 20850 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 20851 // CHECK21-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 20852 // CHECK21-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 20853 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 20854 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 20855 // CHECK21-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 20856 // CHECK21-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 20857 // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 20858 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 20859 // CHECK21-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 20860 // CHECK21-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 20861 // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 20862 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 20863 // CHECK21-NEXT: ret void 20864 // 20865 // 20866 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..3 20867 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 20868 // CHECK21-NEXT: entry: 20869 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 20870 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 20871 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 20872 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 20873 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 20874 // CHECK21-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 20875 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 20876 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 20877 // CHECK21-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 20878 // CHECK21-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 20879 // CHECK21-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 20880 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 20881 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 20882 // CHECK21-NEXT: [[TMP:%.*]] = alloca i8, align 1 20883 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 20884 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 20885 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 20886 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 20887 // CHECK21-NEXT: [[IT:%.*]] = alloca i8, align 1 20888 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 20889 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 20890 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 20891 // CHECK21-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 20892 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 20893 // CHECK21-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 20894 // CHECK21-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 20895 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 20896 // CHECK21-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 20897 // CHECK21-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 20898 // CHECK21-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 20899 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 20900 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 20901 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 20902 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 20903 // CHECK21-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 20904 // CHECK21-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 20905 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 20906 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 20907 // CHECK21-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 20908 // CHECK21-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 20909 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 20910 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 20911 // CHECK21-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 20912 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 20913 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 20914 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 20915 // CHECK21-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 20916 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 20917 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 20918 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 20919 // CHECK21: omp.dispatch.cond: 20920 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20921 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 20922 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 20923 // CHECK21: cond.true: 20924 // CHECK21-NEXT: br label [[COND_END:%.*]] 20925 // CHECK21: cond.false: 20926 // CHECK21-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20927 // CHECK21-NEXT: br label [[COND_END]] 20928 // CHECK21: cond.end: 20929 // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 20930 // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 20931 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20932 // CHECK21-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 20933 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 20934 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 20935 // CHECK21-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 20936 // CHECK21-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 20937 // CHECK21: omp.dispatch.body: 20938 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 20939 // CHECK21: omp.inner.for.cond: 20940 // CHECK21-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 20941 // CHECK21-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 20942 // CHECK21-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 20943 // CHECK21-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 20944 // CHECK21: omp.inner.for.body: 20945 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 20946 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 20947 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 20948 // CHECK21-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 20949 // CHECK21-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23 20950 // CHECK21-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !23 20951 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 20952 // CHECK21-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !23 20953 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 20954 // CHECK21-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23 20955 // CHECK21-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 20956 // CHECK21-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 20957 // CHECK21-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 20958 // CHECK21-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23 20959 // CHECK21-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 20960 // CHECK21-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 20961 // CHECK21-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 20962 // CHECK21-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 20963 // CHECK21-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 20964 // CHECK21-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 20965 // CHECK21-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 20966 // CHECK21-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 20967 // CHECK21-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 20968 // CHECK21-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 20969 // CHECK21-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 20970 // CHECK21-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 20971 // CHECK21-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 20972 // CHECK21-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 20973 // CHECK21-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 20974 // CHECK21-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 20975 // CHECK21-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 20976 // CHECK21-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 20977 // CHECK21-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23 20978 // CHECK21-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 20979 // CHECK21-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23 20980 // CHECK21-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 20981 // CHECK21-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23 20982 // CHECK21-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 20983 // CHECK21-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 20984 // CHECK21-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 20985 // CHECK21-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23 20986 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 20987 // CHECK21: omp.body.continue: 20988 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 20989 // CHECK21: omp.inner.for.inc: 20990 // CHECK21-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 20991 // CHECK21-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 20992 // CHECK21-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 20993 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 20994 // CHECK21: omp.inner.for.end: 20995 // CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 20996 // CHECK21: omp.dispatch.inc: 20997 // CHECK21-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 20998 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 20999 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 21000 // CHECK21-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 21001 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21002 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 21003 // CHECK21-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 21004 // CHECK21-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 21005 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND]] 21006 // CHECK21: omp.dispatch.end: 21007 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 21008 // CHECK21-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21009 // CHECK21-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 21010 // CHECK21-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21011 // CHECK21: .omp.final.then: 21012 // CHECK21-NEXT: store i8 96, i8* [[IT]], align 1 21013 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 21014 // CHECK21: .omp.final.done: 21015 // CHECK21-NEXT: ret void 21016 // 21017 // 21018 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 21019 // CHECK21-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 21020 // CHECK21-NEXT: entry: 21021 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21022 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21023 // CHECK21-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 21024 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21025 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21026 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 21027 // CHECK21-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 21028 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21029 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21030 // CHECK21-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 21031 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21032 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21033 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21034 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 21035 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21036 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 21037 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21038 // CHECK21-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 21039 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 21040 // CHECK21-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 21041 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 21042 // CHECK21-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 21043 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 21044 // CHECK21-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 21045 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 21046 // CHECK21-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 21047 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 21048 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 21049 // CHECK21-NEXT: ret void 21050 // 21051 // 21052 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..4 21053 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 21054 // CHECK21-NEXT: entry: 21055 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21056 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21057 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21058 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21059 // CHECK21-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 21060 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21061 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 21062 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4 21063 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21064 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21065 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21066 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21067 // CHECK21-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 21068 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21069 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21070 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21071 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 21072 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21073 // CHECK21-NEXT: ret void 21074 // 21075 // 21076 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 21077 // CHECK21-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 21078 // CHECK21-NEXT: entry: 21079 // CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 21080 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 21081 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 21082 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 21083 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 21084 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 21085 // CHECK21-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 21086 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 21087 // CHECK21-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 21088 // CHECK21-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 21089 // CHECK21-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 21090 // CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 21091 // CHECK21-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 21092 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 21093 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 21094 // CHECK21-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 21095 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 21096 // CHECK21-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 21097 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 21098 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 21099 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 21100 // CHECK21-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 21101 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 21102 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 21103 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* 21104 // CHECK21-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 21105 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 21106 // CHECK21-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 8 21107 // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 21108 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 21109 // CHECK21-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 21110 // CHECK21-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 21111 // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 21112 // CHECK21-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 8 21113 // CHECK21-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 21114 // CHECK21-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 21115 // CHECK21: omp_if.then: 21116 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) 21117 // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] 21118 // CHECK21: omp_if.else: 21119 // CHECK21-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 21120 // CHECK21-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 21121 // CHECK21-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 21122 // CHECK21-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]] 21123 // CHECK21-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 21124 // CHECK21-NEXT: br label [[OMP_IF_END]] 21125 // CHECK21: omp_if.end: 21126 // CHECK21-NEXT: ret void 21127 // 21128 // 21129 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..5 21130 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 21131 // CHECK21-NEXT: entry: 21132 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21133 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21134 // CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 21135 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 21136 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 21137 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 21138 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 21139 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 21140 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 21141 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 21142 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 21143 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 21144 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 21145 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21146 // CHECK21-NEXT: [[IT:%.*]] = alloca i64, align 8 21147 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21148 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21149 // CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 21150 // CHECK21-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 21151 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 21152 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 21153 // CHECK21-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 21154 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 21155 // CHECK21-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 21156 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 21157 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 21158 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 21159 // CHECK21-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 21160 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 21161 // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 21162 // CHECK21-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 21163 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 21164 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21165 // CHECK21-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 8 21166 // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 21167 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 21168 // CHECK21: omp_if.then: 21169 // CHECK21-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21170 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 21171 // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 21172 // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21173 // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 21174 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21175 // CHECK21: cond.true: 21176 // CHECK21-NEXT: br label [[COND_END:%.*]] 21177 // CHECK21: cond.false: 21178 // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21179 // CHECK21-NEXT: br label [[COND_END]] 21180 // CHECK21: cond.end: 21181 // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 21182 // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 21183 // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 21184 // CHECK21-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 21185 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21186 // CHECK21: omp.inner.for.cond: 21187 // CHECK21-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 21188 // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26 21189 // CHECK21-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 21190 // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21191 // CHECK21: omp.inner.for.body: 21192 // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 21193 // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 21194 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 21195 // CHECK21-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26 21196 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 21197 // CHECK21-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double 21198 // CHECK21-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 21199 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 21200 // CHECK21-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !27, !llvm.access.group !26 21201 // CHECK21-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 21202 // CHECK21-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26 21203 // CHECK21-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 21204 // CHECK21-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26 21205 // CHECK21-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 21206 // CHECK21-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] 21207 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] 21208 // CHECK21-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 21209 // CHECK21-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !26 21210 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21211 // CHECK21: omp.body.continue: 21212 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21213 // CHECK21: omp.inner.for.inc: 21214 // CHECK21-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 21215 // CHECK21-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 21216 // CHECK21-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 21217 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 21218 // CHECK21: omp.inner.for.end: 21219 // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] 21220 // CHECK21: omp_if.else: 21221 // CHECK21-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21222 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 21223 // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 21224 // CHECK21-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21225 // CHECK21-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 21226 // CHECK21-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] 21227 // CHECK21: cond.true11: 21228 // CHECK21-NEXT: br label [[COND_END13:%.*]] 21229 // CHECK21: cond.false12: 21230 // CHECK21-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21231 // CHECK21-NEXT: br label [[COND_END13]] 21232 // CHECK21: cond.end13: 21233 // CHECK21-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] 21234 // CHECK21-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 21235 // CHECK21-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 21236 // CHECK21-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 21237 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] 21238 // CHECK21: omp.inner.for.cond15: 21239 // CHECK21-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 21240 // CHECK21-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21241 // CHECK21-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 21242 // CHECK21-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 21243 // CHECK21: omp.inner.for.body17: 21244 // CHECK21-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 21245 // CHECK21-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 21246 // CHECK21-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] 21247 // CHECK21-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 21248 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 8 21249 // CHECK21-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double 21250 // CHECK21-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 21251 // CHECK21-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 21252 // CHECK21-NEXT: store double [[ADD21]], double* [[A22]], align 8 21253 // CHECK21-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 21254 // CHECK21-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 21255 // CHECK21-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 21256 // CHECK21-NEXT: store double [[INC24]], double* [[A23]], align 8 21257 // CHECK21-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 21258 // CHECK21-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] 21259 // CHECK21-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] 21260 // CHECK21-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 21261 // CHECK21-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 21262 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 21263 // CHECK21: omp.body.continue28: 21264 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 21265 // CHECK21: omp.inner.for.inc29: 21266 // CHECK21-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 21267 // CHECK21-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 21268 // CHECK21-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 21269 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP30:![0-9]+]] 21270 // CHECK21: omp.inner.for.end31: 21271 // CHECK21-NEXT: br label [[OMP_IF_END]] 21272 // CHECK21: omp_if.end: 21273 // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 21274 // CHECK21: omp.loop.exit: 21275 // CHECK21-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21276 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 21277 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 21278 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21279 // CHECK21-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 21280 // CHECK21-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21281 // CHECK21: .omp.final.then: 21282 // CHECK21-NEXT: store i64 400, i64* [[IT]], align 8 21283 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 21284 // CHECK21: .omp.final.done: 21285 // CHECK21-NEXT: ret void 21286 // 21287 // 21288 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 21289 // CHECK21-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 21290 // CHECK21-NEXT: entry: 21291 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21292 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21293 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21294 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21295 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 21296 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21297 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21298 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21299 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21300 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21301 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21302 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 21303 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21304 // CHECK21-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 21305 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 21306 // CHECK21-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 21307 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 21308 // CHECK21-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 21309 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 21310 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 21311 // CHECK21-NEXT: ret void 21312 // 21313 // 21314 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..6 21315 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 21316 // CHECK21-NEXT: entry: 21317 // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21318 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21319 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21320 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21321 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21322 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 21323 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 21324 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 21325 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 21326 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 21327 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21328 // CHECK21-NEXT: [[I:%.*]] = alloca i64, align 8 21329 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21330 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21331 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21332 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21333 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21334 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21335 // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21336 // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21337 // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 21338 // CHECK21-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 21339 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 21340 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21341 // CHECK21-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21342 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 21343 // CHECK21-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 21344 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21345 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 21346 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21347 // CHECK21: cond.true: 21348 // CHECK21-NEXT: br label [[COND_END:%.*]] 21349 // CHECK21: cond.false: 21350 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21351 // CHECK21-NEXT: br label [[COND_END]] 21352 // CHECK21: cond.end: 21353 // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 21354 // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 21355 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 21356 // CHECK21-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 21357 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21358 // CHECK21: omp.inner.for.cond: 21359 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 21360 // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !32 21361 // CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 21362 // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21363 // CHECK21: omp.inner.for.body: 21364 // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 21365 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 21366 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 21367 // CHECK21-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !32 21368 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 21369 // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 21370 // CHECK21-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !32 21371 // CHECK21-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 21372 // CHECK21-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 21373 // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 21374 // CHECK21-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 21375 // CHECK21-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !32 21376 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 21377 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32 21378 // CHECK21-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 21379 // CHECK21-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !32 21380 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21381 // CHECK21: omp.body.continue: 21382 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21383 // CHECK21: omp.inner.for.inc: 21384 // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 21385 // CHECK21-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 21386 // CHECK21-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 21387 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 21388 // CHECK21: omp.inner.for.end: 21389 // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 21390 // CHECK21: omp.loop.exit: 21391 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 21392 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21393 // CHECK21-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 21394 // CHECK21-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21395 // CHECK21: .omp.final.then: 21396 // CHECK21-NEXT: store i64 11, i64* [[I]], align 8 21397 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] 21398 // CHECK21: .omp.final.done: 21399 // CHECK21-NEXT: ret void 21400 // 21401 // 21402 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 21403 // CHECK22-SAME: () #[[ATTR0:[0-9]+]] { 21404 // CHECK22-NEXT: entry: 21405 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 21406 // CHECK22-NEXT: ret void 21407 // 21408 // 21409 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined. 21410 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 21411 // CHECK22-NEXT: entry: 21412 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21413 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21414 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 21415 // CHECK22-NEXT: [[TMP:%.*]] = alloca i32, align 4 21416 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 21417 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 21418 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 21419 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21420 // CHECK22-NEXT: [[I:%.*]] = alloca i32, align 4 21421 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21422 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21423 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 21424 // CHECK22-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 21425 // CHECK22-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 21426 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21427 // CHECK22-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21428 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 21429 // CHECK22-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 21430 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21431 // CHECK22-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 21432 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21433 // CHECK22: cond.true: 21434 // CHECK22-NEXT: br label [[COND_END:%.*]] 21435 // CHECK22: cond.false: 21436 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21437 // CHECK22-NEXT: br label [[COND_END]] 21438 // CHECK22: cond.end: 21439 // CHECK22-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 21440 // CHECK22-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 21441 // CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 21442 // CHECK22-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 21443 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21444 // CHECK22: omp.inner.for.cond: 21445 // CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 21446 // CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 21447 // CHECK22-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 21448 // CHECK22-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21449 // CHECK22: omp.inner.for.body: 21450 // CHECK22-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 21451 // CHECK22-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 21452 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 21453 // CHECK22-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 21454 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21455 // CHECK22: omp.body.continue: 21456 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21457 // CHECK22: omp.inner.for.inc: 21458 // CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 21459 // CHECK22-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 21460 // CHECK22-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 21461 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 21462 // CHECK22: omp.inner.for.end: 21463 // CHECK22-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 21464 // CHECK22: omp.loop.exit: 21465 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 21466 // CHECK22-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21467 // CHECK22-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 21468 // CHECK22-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21469 // CHECK22: .omp.final.then: 21470 // CHECK22-NEXT: store i32 33, i32* [[I]], align 4 21471 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 21472 // CHECK22: .omp.final.done: 21473 // CHECK22-NEXT: ret void 21474 // 21475 // 21476 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 21477 // CHECK22-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 21478 // CHECK22-NEXT: entry: 21479 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21480 // CHECK22-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 21481 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21482 // CHECK22-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 21483 // CHECK22-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 21484 // CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21485 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21486 // CHECK22-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 21487 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21488 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21489 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 21490 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21491 // CHECK22-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 21492 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 21493 // CHECK22-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 21494 // CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 21495 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 21496 // CHECK22-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 21497 // CHECK22-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 21498 // CHECK22-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 21499 // CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 21500 // CHECK22-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21501 // CHECK22-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 21502 // CHECK22-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 21503 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 21504 // CHECK22-NEXT: ret void 21505 // 21506 // 21507 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..1 21508 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 21509 // CHECK22-NEXT: entry: 21510 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21511 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21512 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21513 // CHECK22-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 21514 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21515 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 21516 // CHECK22-NEXT: [[TMP:%.*]] = alloca i64, align 8 21517 // CHECK22-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 21518 // CHECK22-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 21519 // CHECK22-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 21520 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 21521 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 21522 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 21523 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21524 // CHECK22-NEXT: [[IT:%.*]] = alloca i64, align 8 21525 // CHECK22-NEXT: [[LIN4:%.*]] = alloca i32, align 4 21526 // CHECK22-NEXT: [[A5:%.*]] = alloca i32, align 4 21527 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21528 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21529 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21530 // CHECK22-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 21531 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21532 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21533 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 21534 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21535 // CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 21536 // CHECK22-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 21537 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 21538 // CHECK22-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 21539 // CHECK22-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 21540 // CHECK22-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 21541 // CHECK22-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 21542 // CHECK22-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 21543 // CHECK22-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 21544 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21545 // CHECK22-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21546 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 21547 // CHECK22-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 21548 // CHECK22-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 21549 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21550 // CHECK22-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 21551 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21552 // CHECK22: cond.true: 21553 // CHECK22-NEXT: br label [[COND_END:%.*]] 21554 // CHECK22: cond.false: 21555 // CHECK22-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 21556 // CHECK22-NEXT: br label [[COND_END]] 21557 // CHECK22: cond.end: 21558 // CHECK22-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 21559 // CHECK22-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 21560 // CHECK22-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 21561 // CHECK22-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 21562 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21563 // CHECK22: omp.inner.for.cond: 21564 // CHECK22-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21565 // CHECK22-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17 21566 // CHECK22-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 21567 // CHECK22-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21568 // CHECK22: omp.inner.for.body: 21569 // CHECK22-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21570 // CHECK22-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 21571 // CHECK22-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 21572 // CHECK22-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17 21573 // CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17 21574 // CHECK22-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 21575 // CHECK22-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21576 // CHECK22-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 21577 // CHECK22-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 21578 // CHECK22-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 21579 // CHECK22-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 21580 // CHECK22-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17 21581 // CHECK22-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17 21582 // CHECK22-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 21583 // CHECK22-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21584 // CHECK22-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17 21585 // CHECK22-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 21586 // CHECK22-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 21587 // CHECK22-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 21588 // CHECK22-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17 21589 // CHECK22-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8, !llvm.access.group !17 21590 // CHECK22-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 21591 // CHECK22-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 21592 // CHECK22-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 21593 // CHECK22-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8, !llvm.access.group !17 21594 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21595 // CHECK22: omp.body.continue: 21596 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21597 // CHECK22: omp.inner.for.inc: 21598 // CHECK22-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21599 // CHECK22-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 21600 // CHECK22-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17 21601 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] 21602 // CHECK22: omp.inner.for.end: 21603 // CHECK22-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 21604 // CHECK22: omp.loop.exit: 21605 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 21606 // CHECK22-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21607 // CHECK22-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 21608 // CHECK22-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21609 // CHECK22: .omp.final.then: 21610 // CHECK22-NEXT: store i64 400, i64* [[IT]], align 8 21611 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 21612 // CHECK22: .omp.final.done: 21613 // CHECK22-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21614 // CHECK22-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 21615 // CHECK22-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 21616 // CHECK22: .omp.linear.pu: 21617 // CHECK22-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 21618 // CHECK22-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 8 21619 // CHECK22-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 21620 // CHECK22-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 8 21621 // CHECK22-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 21622 // CHECK22: .omp.linear.pu.done: 21623 // CHECK22-NEXT: ret void 21624 // 21625 // 21626 // CHECK22-LABEL: define {{[^@]+}}@_Z7get_valv 21627 // CHECK22-SAME: () #[[ATTR3:[0-9]+]] { 21628 // CHECK22-NEXT: entry: 21629 // CHECK22-NEXT: ret i64 0 21630 // 21631 // 21632 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 21633 // CHECK22-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 21634 // CHECK22-NEXT: entry: 21635 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21636 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21637 // CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21638 // CHECK22-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 21639 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21640 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21641 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21642 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21643 // CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 21644 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21645 // CHECK22-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 21646 // CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 21647 // CHECK22-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 21648 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 21649 // CHECK22-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 21650 // CHECK22-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 21651 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 21652 // CHECK22-NEXT: ret void 21653 // 21654 // 21655 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..2 21656 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 21657 // CHECK22-NEXT: entry: 21658 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21659 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21660 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21661 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21662 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 21663 // CHECK22-NEXT: [[TMP:%.*]] = alloca i16, align 2 21664 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 21665 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 21666 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 21667 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21668 // CHECK22-NEXT: [[IT:%.*]] = alloca i16, align 2 21669 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21670 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21671 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21672 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21673 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21674 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21675 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 21676 // CHECK22-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 21677 // CHECK22-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 21678 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21679 // CHECK22-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21680 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 21681 // CHECK22-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 21682 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21683 // CHECK22-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 21684 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21685 // CHECK22: cond.true: 21686 // CHECK22-NEXT: br label [[COND_END:%.*]] 21687 // CHECK22: cond.false: 21688 // CHECK22-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21689 // CHECK22-NEXT: br label [[COND_END]] 21690 // CHECK22: cond.end: 21691 // CHECK22-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 21692 // CHECK22-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 21693 // CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 21694 // CHECK22-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 21695 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21696 // CHECK22: omp.inner.for.cond: 21697 // CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 21698 // CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20 21699 // CHECK22-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 21700 // CHECK22-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21701 // CHECK22: omp.inner.for.body: 21702 // CHECK22-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 21703 // CHECK22-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 21704 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 21705 // CHECK22-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 21706 // CHECK22-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20 21707 // CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !20 21708 // CHECK22-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 21709 // CHECK22-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !20 21710 // CHECK22-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !20 21711 // CHECK22-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 21712 // CHECK22-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 21713 // CHECK22-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 21714 // CHECK22-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8, !llvm.access.group !20 21715 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21716 // CHECK22: omp.body.continue: 21717 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21718 // CHECK22: omp.inner.for.inc: 21719 // CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 21720 // CHECK22-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 21721 // CHECK22-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 21722 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 21723 // CHECK22: omp.inner.for.end: 21724 // CHECK22-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 21725 // CHECK22: omp.loop.exit: 21726 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 21727 // CHECK22-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21728 // CHECK22-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 21729 // CHECK22-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21730 // CHECK22: .omp.final.then: 21731 // CHECK22-NEXT: store i16 22, i16* [[IT]], align 2 21732 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 21733 // CHECK22: .omp.final.done: 21734 // CHECK22-NEXT: ret void 21735 // 21736 // 21737 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 21738 // CHECK22-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 21739 // CHECK22-NEXT: entry: 21740 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21741 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 21742 // CHECK22-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 21743 // CHECK22-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 21744 // CHECK22-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 21745 // CHECK22-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 21746 // CHECK22-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 21747 // CHECK22-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 21748 // CHECK22-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 21749 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 21750 // CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21751 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 21752 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21753 // CHECK22-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 21754 // CHECK22-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 21755 // CHECK22-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 21756 // CHECK22-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 21757 // CHECK22-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 21758 // CHECK22-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 21759 // CHECK22-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 21760 // CHECK22-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 21761 // CHECK22-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 21762 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21763 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 21764 // CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 21765 // CHECK22-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 21766 // CHECK22-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 21767 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 21768 // CHECK22-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 21769 // CHECK22-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 21770 // CHECK22-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 21771 // CHECK22-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 21772 // CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 21773 // CHECK22-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21774 // CHECK22-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 21775 // CHECK22-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 21776 // CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 21777 // CHECK22-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 21778 // CHECK22-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 21779 // CHECK22-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 21780 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 21781 // CHECK22-NEXT: ret void 21782 // 21783 // 21784 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..3 21785 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 21786 // CHECK22-NEXT: entry: 21787 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21788 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21789 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21790 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 21791 // CHECK22-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 21792 // CHECK22-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 21793 // CHECK22-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 21794 // CHECK22-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 21795 // CHECK22-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 21796 // CHECK22-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 21797 // CHECK22-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 21798 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 21799 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 21800 // CHECK22-NEXT: [[TMP:%.*]] = alloca i8, align 1 21801 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 21802 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 21803 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 21804 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 21805 // CHECK22-NEXT: [[IT:%.*]] = alloca i8, align 1 21806 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21807 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21808 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21809 // CHECK22-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 21810 // CHECK22-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 21811 // CHECK22-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 21812 // CHECK22-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 21813 // CHECK22-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 21814 // CHECK22-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 21815 // CHECK22-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 21816 // CHECK22-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 21817 // CHECK22-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 21818 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21819 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 21820 // CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 21821 // CHECK22-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 21822 // CHECK22-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 21823 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 21824 // CHECK22-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 21825 // CHECK22-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 21826 // CHECK22-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 21827 // CHECK22-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 21828 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 21829 // CHECK22-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 21830 // CHECK22-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 21831 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 21832 // CHECK22-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 21833 // CHECK22-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 21834 // CHECK22-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 21835 // CHECK22-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 21836 // CHECK22-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 21837 // CHECK22: omp.dispatch.cond: 21838 // CHECK22-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21839 // CHECK22-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 21840 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 21841 // CHECK22: cond.true: 21842 // CHECK22-NEXT: br label [[COND_END:%.*]] 21843 // CHECK22: cond.false: 21844 // CHECK22-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21845 // CHECK22-NEXT: br label [[COND_END]] 21846 // CHECK22: cond.end: 21847 // CHECK22-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 21848 // CHECK22-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 21849 // CHECK22-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 21850 // CHECK22-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 21851 // CHECK22-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 21852 // CHECK22-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21853 // CHECK22-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 21854 // CHECK22-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 21855 // CHECK22: omp.dispatch.body: 21856 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 21857 // CHECK22: omp.inner.for.cond: 21858 // CHECK22-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 21859 // CHECK22-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 21860 // CHECK22-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 21861 // CHECK22-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 21862 // CHECK22: omp.inner.for.body: 21863 // CHECK22-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 21864 // CHECK22-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 21865 // CHECK22-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 21866 // CHECK22-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 21867 // CHECK22-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23 21868 // CHECK22-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !23 21869 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 21870 // CHECK22-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8, !llvm.access.group !23 21871 // CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 21872 // CHECK22-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23 21873 // CHECK22-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 21874 // CHECK22-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 21875 // CHECK22-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 21876 // CHECK22-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23 21877 // CHECK22-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 21878 // CHECK22-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 21879 // CHECK22-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 21880 // CHECK22-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 21881 // CHECK22-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 21882 // CHECK22-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23 21883 // CHECK22-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 21884 // CHECK22-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 21885 // CHECK22-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 21886 // CHECK22-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 21887 // CHECK22-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23 21888 // CHECK22-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 21889 // CHECK22-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 21890 // CHECK22-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 21891 // CHECK22-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 21892 // CHECK22-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 21893 // CHECK22-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23 21894 // CHECK22-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 21895 // CHECK22-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23 21896 // CHECK22-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 21897 // CHECK22-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23 21898 // CHECK22-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 21899 // CHECK22-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23 21900 // CHECK22-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 21901 // CHECK22-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 21902 // CHECK22-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 21903 // CHECK22-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23 21904 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 21905 // CHECK22: omp.body.continue: 21906 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 21907 // CHECK22: omp.inner.for.inc: 21908 // CHECK22-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 21909 // CHECK22-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 21910 // CHECK22-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 21911 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 21912 // CHECK22: omp.inner.for.end: 21913 // CHECK22-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 21914 // CHECK22: omp.dispatch.inc: 21915 // CHECK22-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 21916 // CHECK22-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 21917 // CHECK22-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 21918 // CHECK22-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 21919 // CHECK22-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 21920 // CHECK22-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 21921 // CHECK22-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 21922 // CHECK22-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 21923 // CHECK22-NEXT: br label [[OMP_DISPATCH_COND]] 21924 // CHECK22: omp.dispatch.end: 21925 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 21926 // CHECK22-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 21927 // CHECK22-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 21928 // CHECK22-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 21929 // CHECK22: .omp.final.then: 21930 // CHECK22-NEXT: store i8 96, i8* [[IT]], align 1 21931 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 21932 // CHECK22: .omp.final.done: 21933 // CHECK22-NEXT: ret void 21934 // 21935 // 21936 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 21937 // CHECK22-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 21938 // CHECK22-NEXT: entry: 21939 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21940 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21941 // CHECK22-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 21942 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21943 // CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 21944 // CHECK22-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 21945 // CHECK22-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 21946 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21947 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21948 // CHECK22-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 21949 // CHECK22-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21950 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21951 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21952 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 21953 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21954 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 21955 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 21956 // CHECK22-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 21957 // CHECK22-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 21958 // CHECK22-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 21959 // CHECK22-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 21960 // CHECK22-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 21961 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 21962 // CHECK22-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 21963 // CHECK22-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 21964 // CHECK22-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 21965 // CHECK22-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 21966 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 21967 // CHECK22-NEXT: ret void 21968 // 21969 // 21970 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..4 21971 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 21972 // CHECK22-NEXT: entry: 21973 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 21974 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 21975 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 21976 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 21977 // CHECK22-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 21978 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 21979 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 21980 // CHECK22-NEXT: [[TMP:%.*]] = alloca i32, align 4 21981 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 21982 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 21983 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 21984 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 21985 // CHECK22-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 21986 // CHECK22-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 21987 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 21988 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 21989 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 21990 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 21991 // CHECK22-NEXT: ret void 21992 // 21993 // 21994 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 21995 // CHECK22-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 21996 // CHECK22-NEXT: entry: 21997 // CHECK22-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 21998 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 21999 // CHECK22-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 22000 // CHECK22-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 22001 // CHECK22-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 22002 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 22003 // CHECK22-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 22004 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 22005 // CHECK22-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 22006 // CHECK22-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 22007 // CHECK22-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 22008 // CHECK22-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 22009 // CHECK22-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 22010 // CHECK22-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 22011 // CHECK22-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 22012 // CHECK22-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 22013 // CHECK22-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 22014 // CHECK22-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 22015 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 22016 // CHECK22-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 22017 // CHECK22-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 22018 // CHECK22-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 22019 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 22020 // CHECK22-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 22021 // CHECK22-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* 22022 // CHECK22-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 22023 // CHECK22-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 22024 // CHECK22-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 8 22025 // CHECK22-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 22026 // CHECK22-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 22027 // CHECK22-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 22028 // CHECK22-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 22029 // CHECK22-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 22030 // CHECK22-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 8 22031 // CHECK22-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 22032 // CHECK22-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 22033 // CHECK22: omp_if.then: 22034 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) 22035 // CHECK22-NEXT: br label [[OMP_IF_END:%.*]] 22036 // CHECK22: omp_if.else: 22037 // CHECK22-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 22038 // CHECK22-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 22039 // CHECK22-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 22040 // CHECK22-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]] 22041 // CHECK22-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 22042 // CHECK22-NEXT: br label [[OMP_IF_END]] 22043 // CHECK22: omp_if.end: 22044 // CHECK22-NEXT: ret void 22045 // 22046 // 22047 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..5 22048 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 22049 // CHECK22-NEXT: entry: 22050 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 22051 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 22052 // CHECK22-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 22053 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 22054 // CHECK22-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 22055 // CHECK22-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 22056 // CHECK22-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 22057 // CHECK22-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 22058 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 22059 // CHECK22-NEXT: [[TMP:%.*]] = alloca i64, align 8 22060 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 22061 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 22062 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 22063 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22064 // CHECK22-NEXT: [[IT:%.*]] = alloca i64, align 8 22065 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 22066 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 22067 // CHECK22-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 22068 // CHECK22-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 22069 // CHECK22-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 22070 // CHECK22-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 22071 // CHECK22-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 22072 // CHECK22-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 22073 // CHECK22-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 22074 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 22075 // CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 22076 // CHECK22-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 22077 // CHECK22-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 22078 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 22079 // CHECK22-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 22080 // CHECK22-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 22081 // CHECK22-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 22082 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22083 // CHECK22-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 8 22084 // CHECK22-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 22085 // CHECK22-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 22086 // CHECK22: omp_if.then: 22087 // CHECK22-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 22088 // CHECK22-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 22089 // CHECK22-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 22090 // CHECK22-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22091 // CHECK22-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 22092 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22093 // CHECK22: cond.true: 22094 // CHECK22-NEXT: br label [[COND_END:%.*]] 22095 // CHECK22: cond.false: 22096 // CHECK22-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22097 // CHECK22-NEXT: br label [[COND_END]] 22098 // CHECK22: cond.end: 22099 // CHECK22-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 22100 // CHECK22-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 22101 // CHECK22-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 22102 // CHECK22-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 22103 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22104 // CHECK22: omp.inner.for.cond: 22105 // CHECK22-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 22106 // CHECK22-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26 22107 // CHECK22-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 22108 // CHECK22-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22109 // CHECK22: omp.inner.for.body: 22110 // CHECK22-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 22111 // CHECK22-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 22112 // CHECK22-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 22113 // CHECK22-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26 22114 // CHECK22-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !26 22115 // CHECK22-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double 22116 // CHECK22-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 22117 // CHECK22-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 22118 // CHECK22-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !27, !llvm.access.group !26 22119 // CHECK22-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 22120 // CHECK22-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26 22121 // CHECK22-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 22122 // CHECK22-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26 22123 // CHECK22-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 22124 // CHECK22-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] 22125 // CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] 22126 // CHECK22-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 22127 // CHECK22-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !26 22128 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22129 // CHECK22: omp.body.continue: 22130 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22131 // CHECK22: omp.inner.for.inc: 22132 // CHECK22-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 22133 // CHECK22-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 22134 // CHECK22-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26 22135 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 22136 // CHECK22: omp.inner.for.end: 22137 // CHECK22-NEXT: br label [[OMP_IF_END:%.*]] 22138 // CHECK22: omp_if.else: 22139 // CHECK22-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 22140 // CHECK22-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 22141 // CHECK22-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 22142 // CHECK22-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22143 // CHECK22-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 22144 // CHECK22-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] 22145 // CHECK22: cond.true11: 22146 // CHECK22-NEXT: br label [[COND_END13:%.*]] 22147 // CHECK22: cond.false12: 22148 // CHECK22-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22149 // CHECK22-NEXT: br label [[COND_END13]] 22150 // CHECK22: cond.end13: 22151 // CHECK22-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] 22152 // CHECK22-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 22153 // CHECK22-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 22154 // CHECK22-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 22155 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] 22156 // CHECK22: omp.inner.for.cond15: 22157 // CHECK22-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 22158 // CHECK22-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22159 // CHECK22-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 22160 // CHECK22-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 22161 // CHECK22: omp.inner.for.body17: 22162 // CHECK22-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 22163 // CHECK22-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 22164 // CHECK22-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] 22165 // CHECK22-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 22166 // CHECK22-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 8 22167 // CHECK22-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double 22168 // CHECK22-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 22169 // CHECK22-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 22170 // CHECK22-NEXT: store double [[ADD21]], double* [[A22]], align 8 22171 // CHECK22-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 22172 // CHECK22-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 22173 // CHECK22-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 22174 // CHECK22-NEXT: store double [[INC24]], double* [[A23]], align 8 22175 // CHECK22-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 22176 // CHECK22-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] 22177 // CHECK22-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] 22178 // CHECK22-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 22179 // CHECK22-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 22180 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 22181 // CHECK22: omp.body.continue28: 22182 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 22183 // CHECK22: omp.inner.for.inc29: 22184 // CHECK22-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 22185 // CHECK22-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 22186 // CHECK22-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 22187 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP30:![0-9]+]] 22188 // CHECK22: omp.inner.for.end31: 22189 // CHECK22-NEXT: br label [[OMP_IF_END]] 22190 // CHECK22: omp_if.end: 22191 // CHECK22-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 22192 // CHECK22: omp.loop.exit: 22193 // CHECK22-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 22194 // CHECK22-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 22195 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 22196 // CHECK22-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22197 // CHECK22-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 22198 // CHECK22-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22199 // CHECK22: .omp.final.then: 22200 // CHECK22-NEXT: store i64 400, i64* [[IT]], align 8 22201 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 22202 // CHECK22: .omp.final.done: 22203 // CHECK22-NEXT: ret void 22204 // 22205 // 22206 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 22207 // CHECK22-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 22208 // CHECK22-NEXT: entry: 22209 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 22210 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 22211 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 22212 // CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 22213 // CHECK22-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 22214 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 22215 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 22216 // CHECK22-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 22217 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 22218 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 22219 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 22220 // CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 22221 // CHECK22-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 22222 // CHECK22-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 22223 // CHECK22-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 22224 // CHECK22-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 22225 // CHECK22-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 22226 // CHECK22-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 22227 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 22228 // CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 22229 // CHECK22-NEXT: ret void 22230 // 22231 // 22232 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..6 22233 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 22234 // CHECK22-NEXT: entry: 22235 // CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 22236 // CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 22237 // CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 22238 // CHECK22-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 22239 // CHECK22-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 22240 // CHECK22-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 22241 // CHECK22-NEXT: [[TMP:%.*]] = alloca i64, align 8 22242 // CHECK22-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 22243 // CHECK22-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 22244 // CHECK22-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 22245 // CHECK22-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22246 // CHECK22-NEXT: [[I:%.*]] = alloca i64, align 8 22247 // CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 22248 // CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 22249 // CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 22250 // CHECK22-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 22251 // CHECK22-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 22252 // CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 22253 // CHECK22-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 22254 // CHECK22-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 22255 // CHECK22-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 22256 // CHECK22-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 22257 // CHECK22-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 22258 // CHECK22-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22259 // CHECK22-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 22260 // CHECK22-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 22261 // CHECK22-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 22262 // CHECK22-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22263 // CHECK22-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 22264 // CHECK22-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22265 // CHECK22: cond.true: 22266 // CHECK22-NEXT: br label [[COND_END:%.*]] 22267 // CHECK22: cond.false: 22268 // CHECK22-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22269 // CHECK22-NEXT: br label [[COND_END]] 22270 // CHECK22: cond.end: 22271 // CHECK22-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 22272 // CHECK22-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 22273 // CHECK22-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 22274 // CHECK22-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 22275 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22276 // CHECK22: omp.inner.for.cond: 22277 // CHECK22-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 22278 // CHECK22-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !32 22279 // CHECK22-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 22280 // CHECK22-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22281 // CHECK22: omp.inner.for.body: 22282 // CHECK22-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 22283 // CHECK22-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 22284 // CHECK22-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 22285 // CHECK22-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !32 22286 // CHECK22-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !32 22287 // CHECK22-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 22288 // CHECK22-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8, !llvm.access.group !32 22289 // CHECK22-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8, !llvm.access.group !32 22290 // CHECK22-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 22291 // CHECK22-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 22292 // CHECK22-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 22293 // CHECK22-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8, !llvm.access.group !32 22294 // CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 22295 // CHECK22-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32 22296 // CHECK22-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 22297 // CHECK22-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !32 22298 // CHECK22-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22299 // CHECK22: omp.body.continue: 22300 // CHECK22-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22301 // CHECK22: omp.inner.for.inc: 22302 // CHECK22-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 22303 // CHECK22-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 22304 // CHECK22-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32 22305 // CHECK22-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 22306 // CHECK22: omp.inner.for.end: 22307 // CHECK22-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 22308 // CHECK22: omp.loop.exit: 22309 // CHECK22-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 22310 // CHECK22-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22311 // CHECK22-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 22312 // CHECK22-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22313 // CHECK22: .omp.final.then: 22314 // CHECK22-NEXT: store i64 11, i64* [[I]], align 8 22315 // CHECK22-NEXT: br label [[DOTOMP_FINAL_DONE]] 22316 // CHECK22: .omp.final.done: 22317 // CHECK22-NEXT: ret void 22318 // 22319 // 22320 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 22321 // CHECK23-SAME: () #[[ATTR0:[0-9]+]] { 22322 // CHECK23-NEXT: entry: 22323 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 22324 // CHECK23-NEXT: ret void 22325 // 22326 // 22327 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined. 22328 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 22329 // CHECK23-NEXT: entry: 22330 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22331 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22332 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 22333 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4 22334 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 22335 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 22336 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 22337 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22338 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4 22339 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22340 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22341 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 22342 // CHECK23-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 22343 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 22344 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22345 // CHECK23-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 22346 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 22347 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 22348 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22349 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 22350 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22351 // CHECK23: cond.true: 22352 // CHECK23-NEXT: br label [[COND_END:%.*]] 22353 // CHECK23: cond.false: 22354 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22355 // CHECK23-NEXT: br label [[COND_END]] 22356 // CHECK23: cond.end: 22357 // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 22358 // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 22359 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 22360 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 22361 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22362 // CHECK23: omp.inner.for.cond: 22363 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 22364 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 22365 // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 22366 // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22367 // CHECK23: omp.inner.for.body: 22368 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 22369 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 22370 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 22371 // CHECK23-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 22372 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22373 // CHECK23: omp.body.continue: 22374 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22375 // CHECK23: omp.inner.for.inc: 22376 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 22377 // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 22378 // CHECK23-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 22379 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 22380 // CHECK23: omp.inner.for.end: 22381 // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 22382 // CHECK23: omp.loop.exit: 22383 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 22384 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22385 // CHECK23-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 22386 // CHECK23-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22387 // CHECK23: .omp.final.then: 22388 // CHECK23-NEXT: store i32 33, i32* [[I]], align 4 22389 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 22390 // CHECK23: .omp.final.done: 22391 // CHECK23-NEXT: ret void 22392 // 22393 // 22394 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 22395 // CHECK23-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 22396 // CHECK23-NEXT: entry: 22397 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22398 // CHECK23-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 22399 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22400 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 22401 // CHECK23-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 22402 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 22403 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22404 // CHECK23-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 22405 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22406 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22407 // CHECK23-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 22408 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 22409 // CHECK23-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 22410 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 22411 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 22412 // CHECK23-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 22413 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 22414 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 22415 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 22416 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 22417 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 22418 // CHECK23-NEXT: ret void 22419 // 22420 // 22421 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..1 22422 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 22423 // CHECK23-NEXT: entry: 22424 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22425 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22426 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22427 // CHECK23-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 22428 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22429 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 22430 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 22431 // CHECK23-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 22432 // CHECK23-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 22433 // CHECK23-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 22434 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 22435 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 22436 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 22437 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22438 // CHECK23-NEXT: [[IT:%.*]] = alloca i64, align 8 22439 // CHECK23-NEXT: [[LIN2:%.*]] = alloca i32, align 4 22440 // CHECK23-NEXT: [[A3:%.*]] = alloca i32, align 4 22441 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22442 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22443 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22444 // CHECK23-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 22445 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22446 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22447 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 22448 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 22449 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 22450 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 22451 // CHECK23-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 22452 // CHECK23-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 22453 // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 22454 // CHECK23-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 22455 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 22456 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22457 // CHECK23-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 22458 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 22459 // CHECK23-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 22460 // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 22461 // CHECK23-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22462 // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 22463 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22464 // CHECK23: cond.true: 22465 // CHECK23-NEXT: br label [[COND_END:%.*]] 22466 // CHECK23: cond.false: 22467 // CHECK23-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22468 // CHECK23-NEXT: br label [[COND_END]] 22469 // CHECK23: cond.end: 22470 // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 22471 // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 22472 // CHECK23-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 22473 // CHECK23-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 22474 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22475 // CHECK23: omp.inner.for.cond: 22476 // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22477 // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 22478 // CHECK23-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 22479 // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22480 // CHECK23: omp.inner.for.body: 22481 // CHECK23-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22482 // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 22483 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 22484 // CHECK23-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 22485 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18 22486 // CHECK23-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 22487 // CHECK23-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22488 // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 22489 // CHECK23-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 22490 // CHECK23-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 22491 // CHECK23-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 22492 // CHECK23-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18 22493 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18 22494 // CHECK23-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 22495 // CHECK23-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22496 // CHECK23-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 22497 // CHECK23-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 22498 // CHECK23-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 22499 // CHECK23-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 22500 // CHECK23-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18 22501 // CHECK23-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !18 22502 // CHECK23-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 22503 // CHECK23-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 22504 // CHECK23-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 22505 // CHECK23-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !18 22506 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22507 // CHECK23: omp.body.continue: 22508 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22509 // CHECK23: omp.inner.for.inc: 22510 // CHECK23-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22511 // CHECK23-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 22512 // CHECK23-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 22513 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 22514 // CHECK23: omp.inner.for.end: 22515 // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 22516 // CHECK23: omp.loop.exit: 22517 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 22518 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22519 // CHECK23-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 22520 // CHECK23-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22521 // CHECK23: .omp.final.then: 22522 // CHECK23-NEXT: store i64 400, i64* [[IT]], align 8 22523 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 22524 // CHECK23: .omp.final.done: 22525 // CHECK23-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22526 // CHECK23-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 22527 // CHECK23-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 22528 // CHECK23: .omp.linear.pu: 22529 // CHECK23-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 22530 // CHECK23-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 22531 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 22532 // CHECK23-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 22533 // CHECK23-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 22534 // CHECK23: .omp.linear.pu.done: 22535 // CHECK23-NEXT: ret void 22536 // 22537 // 22538 // CHECK23-LABEL: define {{[^@]+}}@_Z7get_valv 22539 // CHECK23-SAME: () #[[ATTR3:[0-9]+]] { 22540 // CHECK23-NEXT: entry: 22541 // CHECK23-NEXT: ret i64 0 22542 // 22543 // 22544 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 22545 // CHECK23-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 22546 // CHECK23-NEXT: entry: 22547 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22548 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22549 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 22550 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 22551 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22552 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22553 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22554 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 22555 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 22556 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 22557 // CHECK23-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 22558 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 22559 // CHECK23-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 22560 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 22561 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 22562 // CHECK23-NEXT: ret void 22563 // 22564 // 22565 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..2 22566 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 22567 // CHECK23-NEXT: entry: 22568 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22569 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22570 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22571 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22572 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 22573 // CHECK23-NEXT: [[TMP:%.*]] = alloca i16, align 2 22574 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 22575 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 22576 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 22577 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22578 // CHECK23-NEXT: [[IT:%.*]] = alloca i16, align 2 22579 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22580 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22581 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22582 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22583 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22584 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 22585 // CHECK23-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 22586 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 22587 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22588 // CHECK23-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 22589 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 22590 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 22591 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22592 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 22593 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22594 // CHECK23: cond.true: 22595 // CHECK23-NEXT: br label [[COND_END:%.*]] 22596 // CHECK23: cond.false: 22597 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22598 // CHECK23-NEXT: br label [[COND_END]] 22599 // CHECK23: cond.end: 22600 // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 22601 // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 22602 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 22603 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 22604 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22605 // CHECK23: omp.inner.for.cond: 22606 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 22607 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 22608 // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 22609 // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22610 // CHECK23: omp.inner.for.body: 22611 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 22612 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 22613 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 22614 // CHECK23-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 22615 // CHECK23-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21 22616 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21 22617 // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 22618 // CHECK23-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21 22619 // CHECK23-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !21 22620 // CHECK23-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 22621 // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 22622 // CHECK23-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 22623 // CHECK23-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !21 22624 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22625 // CHECK23: omp.body.continue: 22626 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22627 // CHECK23: omp.inner.for.inc: 22628 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 22629 // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 22630 // CHECK23-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 22631 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 22632 // CHECK23: omp.inner.for.end: 22633 // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 22634 // CHECK23: omp.loop.exit: 22635 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 22636 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22637 // CHECK23-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 22638 // CHECK23-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22639 // CHECK23: .omp.final.then: 22640 // CHECK23-NEXT: store i16 22, i16* [[IT]], align 2 22641 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 22642 // CHECK23: .omp.final.done: 22643 // CHECK23-NEXT: ret void 22644 // 22645 // 22646 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 22647 // CHECK23-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 22648 // CHECK23-NEXT: entry: 22649 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22650 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 22651 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 22652 // CHECK23-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 22653 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 22654 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 22655 // CHECK23-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 22656 // CHECK23-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 22657 // CHECK23-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 22658 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 22659 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 22660 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 22661 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22662 // CHECK23-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 22663 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 22664 // CHECK23-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 22665 // CHECK23-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 22666 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 22667 // CHECK23-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 22668 // CHECK23-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 22669 // CHECK23-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 22670 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22671 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 22672 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 22673 // CHECK23-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 22674 // CHECK23-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 22675 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 22676 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 22677 // CHECK23-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 22678 // CHECK23-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 22679 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 22680 // CHECK23-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 22681 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 22682 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22683 // CHECK23-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 22684 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 22685 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 22686 // CHECK23-NEXT: ret void 22687 // 22688 // 22689 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..3 22690 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 22691 // CHECK23-NEXT: entry: 22692 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22693 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22694 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22695 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 22696 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 22697 // CHECK23-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 22698 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 22699 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 22700 // CHECK23-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 22701 // CHECK23-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 22702 // CHECK23-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 22703 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 22704 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 22705 // CHECK23-NEXT: [[TMP:%.*]] = alloca i8, align 1 22706 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 22707 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 22708 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 22709 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22710 // CHECK23-NEXT: [[IT:%.*]] = alloca i8, align 1 22711 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22712 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22713 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22714 // CHECK23-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 22715 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 22716 // CHECK23-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 22717 // CHECK23-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 22718 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 22719 // CHECK23-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 22720 // CHECK23-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 22721 // CHECK23-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 22722 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22723 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 22724 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 22725 // CHECK23-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 22726 // CHECK23-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 22727 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 22728 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 22729 // CHECK23-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 22730 // CHECK23-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 22731 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 22732 // CHECK23-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 22733 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 22734 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22735 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22736 // CHECK23-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 22737 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 22738 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 22739 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 22740 // CHECK23: omp.dispatch.cond: 22741 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22742 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 22743 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22744 // CHECK23: cond.true: 22745 // CHECK23-NEXT: br label [[COND_END:%.*]] 22746 // CHECK23: cond.false: 22747 // CHECK23-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22748 // CHECK23-NEXT: br label [[COND_END]] 22749 // CHECK23: cond.end: 22750 // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 22751 // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 22752 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 22753 // CHECK23-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 22754 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 22755 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22756 // CHECK23-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 22757 // CHECK23-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 22758 // CHECK23: omp.dispatch.body: 22759 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 22760 // CHECK23: omp.inner.for.cond: 22761 // CHECK23-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 22762 // CHECK23-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 22763 // CHECK23-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 22764 // CHECK23-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 22765 // CHECK23: omp.inner.for.body: 22766 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 22767 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 22768 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 22769 // CHECK23-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 22770 // CHECK23-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24 22771 // CHECK23-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24 22772 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 22773 // CHECK23-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24 22774 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 22775 // CHECK23-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24 22776 // CHECK23-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 22777 // CHECK23-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 22778 // CHECK23-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 22779 // CHECK23-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24 22780 // CHECK23-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 22781 // CHECK23-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 22782 // CHECK23-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 22783 // CHECK23-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 22784 // CHECK23-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 22785 // CHECK23-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 22786 // CHECK23-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 22787 // CHECK23-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 22788 // CHECK23-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 22789 // CHECK23-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 22790 // CHECK23-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 22791 // CHECK23-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 22792 // CHECK23-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 22793 // CHECK23-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 22794 // CHECK23-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 22795 // CHECK23-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 22796 // CHECK23-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 22797 // CHECK23-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 22798 // CHECK23-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24 22799 // CHECK23-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 22800 // CHECK23-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24 22801 // CHECK23-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 22802 // CHECK23-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24 22803 // CHECK23-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 22804 // CHECK23-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 22805 // CHECK23-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 22806 // CHECK23-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24 22807 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 22808 // CHECK23: omp.body.continue: 22809 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 22810 // CHECK23: omp.inner.for.inc: 22811 // CHECK23-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 22812 // CHECK23-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 22813 // CHECK23-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 22814 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 22815 // CHECK23: omp.inner.for.end: 22816 // CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 22817 // CHECK23: omp.dispatch.inc: 22818 // CHECK23-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 22819 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 22820 // CHECK23-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 22821 // CHECK23-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 22822 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 22823 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 22824 // CHECK23-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 22825 // CHECK23-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 22826 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND]] 22827 // CHECK23: omp.dispatch.end: 22828 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 22829 // CHECK23-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 22830 // CHECK23-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 22831 // CHECK23-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 22832 // CHECK23: .omp.final.then: 22833 // CHECK23-NEXT: store i8 96, i8* [[IT]], align 1 22834 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 22835 // CHECK23: .omp.final.done: 22836 // CHECK23-NEXT: ret void 22837 // 22838 // 22839 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 22840 // CHECK23-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 22841 // CHECK23-NEXT: entry: 22842 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22843 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22844 // CHECK23-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 22845 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 22846 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 22847 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 22848 // CHECK23-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 22849 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22850 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22851 // CHECK23-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 22852 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 22853 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22854 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 22855 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 22856 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 22857 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 22858 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 22859 // CHECK23-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 22860 // CHECK23-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 22861 // CHECK23-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 22862 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 22863 // CHECK23-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 22864 // CHECK23-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 22865 // CHECK23-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 22866 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 22867 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 22868 // CHECK23-NEXT: ret void 22869 // 22870 // 22871 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..4 22872 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 22873 // CHECK23-NEXT: entry: 22874 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22875 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22876 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 22877 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 22878 // CHECK23-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 22879 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 22880 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 22881 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4 22882 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22883 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22884 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 22885 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 22886 // CHECK23-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 22887 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 22888 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 22889 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 22890 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 22891 // CHECK23-NEXT: ret void 22892 // 22893 // 22894 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 22895 // CHECK23-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 22896 // CHECK23-NEXT: entry: 22897 // CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 22898 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 22899 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 22900 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 22901 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 22902 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 22903 // CHECK23-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 22904 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 22905 // CHECK23-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 22906 // CHECK23-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 22907 // CHECK23-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 22908 // CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 22909 // CHECK23-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 22910 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 22911 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 22912 // CHECK23-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 22913 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22914 // CHECK23-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 22915 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 22916 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 22917 // CHECK23-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 22918 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 22919 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 22920 // CHECK23-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 22921 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 22922 // CHECK23-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 4 22923 // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 22924 // CHECK23-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 22925 // CHECK23-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 22926 // CHECK23-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 22927 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 22928 // CHECK23-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 4 22929 // CHECK23-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 22930 // CHECK23-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 22931 // CHECK23: omp_if.then: 22932 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) 22933 // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] 22934 // CHECK23: omp_if.else: 22935 // CHECK23-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 22936 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 22937 // CHECK23-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 22938 // CHECK23-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]] 22939 // CHECK23-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 22940 // CHECK23-NEXT: br label [[OMP_IF_END]] 22941 // CHECK23: omp_if.end: 22942 // CHECK23-NEXT: ret void 22943 // 22944 // 22945 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..5 22946 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 22947 // CHECK23-NEXT: entry: 22948 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 22949 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 22950 // CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 22951 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 22952 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 22953 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 22954 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 22955 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 22956 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 22957 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 22958 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 22959 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 22960 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 22961 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 22962 // CHECK23-NEXT: [[IT:%.*]] = alloca i64, align 8 22963 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 22964 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 22965 // CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 22966 // CHECK23-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 22967 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 22968 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 22969 // CHECK23-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 22970 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 22971 // CHECK23-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 22972 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 22973 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 22974 // CHECK23-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 22975 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 22976 // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 22977 // CHECK23-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 22978 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 22979 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 22980 // CHECK23-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 4 22981 // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 22982 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 22983 // CHECK23: omp_if.then: 22984 // CHECK23-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 22985 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 22986 // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 22987 // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22988 // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 22989 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 22990 // CHECK23: cond.true: 22991 // CHECK23-NEXT: br label [[COND_END:%.*]] 22992 // CHECK23: cond.false: 22993 // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 22994 // CHECK23-NEXT: br label [[COND_END]] 22995 // CHECK23: cond.end: 22996 // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 22997 // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 22998 // CHECK23-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 22999 // CHECK23-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 23000 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23001 // CHECK23: omp.inner.for.cond: 23002 // CHECK23-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23003 // CHECK23-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27 23004 // CHECK23-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 23005 // CHECK23-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23006 // CHECK23: omp.inner.for.body: 23007 // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23008 // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 23009 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 23010 // CHECK23-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27 23011 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27 23012 // CHECK23-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double 23013 // CHECK23-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 23014 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 23015 // CHECK23-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !28, !llvm.access.group !27 23016 // CHECK23-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23017 // CHECK23-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27 23018 // CHECK23-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 23019 // CHECK23-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27 23020 // CHECK23-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 23021 // CHECK23-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] 23022 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] 23023 // CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 23024 // CHECK23-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !27 23025 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23026 // CHECK23: omp.body.continue: 23027 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23028 // CHECK23: omp.inner.for.inc: 23029 // CHECK23-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23030 // CHECK23-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 23031 // CHECK23-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23032 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] 23033 // CHECK23: omp.inner.for.end: 23034 // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] 23035 // CHECK23: omp_if.else: 23036 // CHECK23-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23037 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 23038 // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 23039 // CHECK23-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23040 // CHECK23-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 23041 // CHECK23-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] 23042 // CHECK23: cond.true10: 23043 // CHECK23-NEXT: br label [[COND_END12:%.*]] 23044 // CHECK23: cond.false11: 23045 // CHECK23-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23046 // CHECK23-NEXT: br label [[COND_END12]] 23047 // CHECK23: cond.end12: 23048 // CHECK23-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] 23049 // CHECK23-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 23050 // CHECK23-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 23051 // CHECK23-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 23052 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] 23053 // CHECK23: omp.inner.for.cond14: 23054 // CHECK23-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23055 // CHECK23-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23056 // CHECK23-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 23057 // CHECK23-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 23058 // CHECK23: omp.inner.for.body16: 23059 // CHECK23-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23060 // CHECK23-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 23061 // CHECK23-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] 23062 // CHECK23-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 23063 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 23064 // CHECK23-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double 23065 // CHECK23-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 23066 // CHECK23-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23067 // CHECK23-NEXT: store double [[ADD20]], double* [[A21]], align 4 23068 // CHECK23-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23069 // CHECK23-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 23070 // CHECK23-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 23071 // CHECK23-NEXT: store double [[INC23]], double* [[A22]], align 4 23072 // CHECK23-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 23073 // CHECK23-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] 23074 // CHECK23-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] 23075 // CHECK23-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 23076 // CHECK23-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 23077 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 23078 // CHECK23: omp.body.continue27: 23079 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 23080 // CHECK23: omp.inner.for.inc28: 23081 // CHECK23-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23082 // CHECK23-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 23083 // CHECK23-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 23084 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP31:![0-9]+]] 23085 // CHECK23: omp.inner.for.end30: 23086 // CHECK23-NEXT: br label [[OMP_IF_END]] 23087 // CHECK23: omp_if.end: 23088 // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23089 // CHECK23: omp.loop.exit: 23090 // CHECK23-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23091 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 23092 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 23093 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23094 // CHECK23-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 23095 // CHECK23-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23096 // CHECK23: .omp.final.then: 23097 // CHECK23-NEXT: store i64 400, i64* [[IT]], align 8 23098 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 23099 // CHECK23: .omp.final.done: 23100 // CHECK23-NEXT: ret void 23101 // 23102 // 23103 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 23104 // CHECK23-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 23105 // CHECK23-NEXT: entry: 23106 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23107 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23108 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 23109 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 23110 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 23111 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23112 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23113 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 23114 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23115 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 23116 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 23117 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 23118 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 23119 // CHECK23-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 23120 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 23121 // CHECK23-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 23122 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 23123 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 23124 // CHECK23-NEXT: ret void 23125 // 23126 // 23127 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..6 23128 // CHECK23-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 23129 // CHECK23-NEXT: entry: 23130 // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23131 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23132 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23133 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23134 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 23135 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 23136 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 23137 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 23138 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 23139 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 23140 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23141 // CHECK23-NEXT: [[I:%.*]] = alloca i64, align 8 23142 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23143 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23144 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23145 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23146 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 23147 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23148 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 23149 // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 23150 // CHECK23-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 23151 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 23152 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23153 // CHECK23-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23154 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 23155 // CHECK23-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 23156 // CHECK23-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23157 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 23158 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23159 // CHECK23: cond.true: 23160 // CHECK23-NEXT: br label [[COND_END:%.*]] 23161 // CHECK23: cond.false: 23162 // CHECK23-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23163 // CHECK23-NEXT: br label [[COND_END]] 23164 // CHECK23: cond.end: 23165 // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 23166 // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 23167 // CHECK23-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 23168 // CHECK23-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 23169 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23170 // CHECK23: omp.inner.for.cond: 23171 // CHECK23-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 23172 // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !33 23173 // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 23174 // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23175 // CHECK23: omp.inner.for.body: 23176 // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 23177 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 23178 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 23179 // CHECK23-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !33 23180 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 23181 // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 23182 // CHECK23-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 23183 // CHECK23-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 23184 // CHECK23-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 23185 // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 23186 // CHECK23-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 23187 // CHECK23-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !33 23188 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 23189 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33 23190 // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 23191 // CHECK23-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33 23192 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23193 // CHECK23: omp.body.continue: 23194 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23195 // CHECK23: omp.inner.for.inc: 23196 // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 23197 // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 23198 // CHECK23-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 23199 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 23200 // CHECK23: omp.inner.for.end: 23201 // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23202 // CHECK23: omp.loop.exit: 23203 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 23204 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23205 // CHECK23-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 23206 // CHECK23-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23207 // CHECK23: .omp.final.then: 23208 // CHECK23-NEXT: store i64 11, i64* [[I]], align 8 23209 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] 23210 // CHECK23: .omp.final.done: 23211 // CHECK23-NEXT: ret void 23212 // 23213 // 23214 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 23215 // CHECK24-SAME: () #[[ATTR0:[0-9]+]] { 23216 // CHECK24-NEXT: entry: 23217 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 23218 // CHECK24-NEXT: ret void 23219 // 23220 // 23221 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined. 23222 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 23223 // CHECK24-NEXT: entry: 23224 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23225 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23226 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 23227 // CHECK24-NEXT: [[TMP:%.*]] = alloca i32, align 4 23228 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 23229 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 23230 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 23231 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23232 // CHECK24-NEXT: [[I:%.*]] = alloca i32, align 4 23233 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23234 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23235 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 23236 // CHECK24-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 23237 // CHECK24-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 23238 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23239 // CHECK24-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23240 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 23241 // CHECK24-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 23242 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23243 // CHECK24-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 23244 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23245 // CHECK24: cond.true: 23246 // CHECK24-NEXT: br label [[COND_END:%.*]] 23247 // CHECK24: cond.false: 23248 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23249 // CHECK24-NEXT: br label [[COND_END]] 23250 // CHECK24: cond.end: 23251 // CHECK24-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 23252 // CHECK24-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 23253 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 23254 // CHECK24-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 23255 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23256 // CHECK24: omp.inner.for.cond: 23257 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 23258 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 23259 // CHECK24-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 23260 // CHECK24-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23261 // CHECK24: omp.inner.for.body: 23262 // CHECK24-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 23263 // CHECK24-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 23264 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 23265 // CHECK24-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 23266 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23267 // CHECK24: omp.body.continue: 23268 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23269 // CHECK24: omp.inner.for.inc: 23270 // CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 23271 // CHECK24-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 23272 // CHECK24-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 23273 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 23274 // CHECK24: omp.inner.for.end: 23275 // CHECK24-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23276 // CHECK24: omp.loop.exit: 23277 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 23278 // CHECK24-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23279 // CHECK24-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 23280 // CHECK24-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23281 // CHECK24: .omp.final.then: 23282 // CHECK24-NEXT: store i32 33, i32* [[I]], align 4 23283 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 23284 // CHECK24: .omp.final.done: 23285 // CHECK24-NEXT: ret void 23286 // 23287 // 23288 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 23289 // CHECK24-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 23290 // CHECK24-NEXT: entry: 23291 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23292 // CHECK24-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 23293 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23294 // CHECK24-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 23295 // CHECK24-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 23296 // CHECK24-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 23297 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23298 // CHECK24-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 23299 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23300 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23301 // CHECK24-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 23302 // CHECK24-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 23303 // CHECK24-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 23304 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 23305 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 23306 // CHECK24-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 23307 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 23308 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 23309 // CHECK24-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 23310 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 23311 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 23312 // CHECK24-NEXT: ret void 23313 // 23314 // 23315 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..1 23316 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 23317 // CHECK24-NEXT: entry: 23318 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23319 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23320 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23321 // CHECK24-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 23322 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23323 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 23324 // CHECK24-NEXT: [[TMP:%.*]] = alloca i64, align 4 23325 // CHECK24-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 23326 // CHECK24-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 23327 // CHECK24-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 23328 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 23329 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 23330 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 23331 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23332 // CHECK24-NEXT: [[IT:%.*]] = alloca i64, align 8 23333 // CHECK24-NEXT: [[LIN2:%.*]] = alloca i32, align 4 23334 // CHECK24-NEXT: [[A3:%.*]] = alloca i32, align 4 23335 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23336 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23337 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23338 // CHECK24-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 23339 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23340 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23341 // CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 23342 // CHECK24-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 23343 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 23344 // CHECK24-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 23345 // CHECK24-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 23346 // CHECK24-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 23347 // CHECK24-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 23348 // CHECK24-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 23349 // CHECK24-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 23350 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23351 // CHECK24-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23352 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 23353 // CHECK24-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 23354 // CHECK24-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 23355 // CHECK24-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23356 // CHECK24-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 23357 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23358 // CHECK24: cond.true: 23359 // CHECK24-NEXT: br label [[COND_END:%.*]] 23360 // CHECK24: cond.false: 23361 // CHECK24-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23362 // CHECK24-NEXT: br label [[COND_END]] 23363 // CHECK24: cond.end: 23364 // CHECK24-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 23365 // CHECK24-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 23366 // CHECK24-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 23367 // CHECK24-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 23368 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23369 // CHECK24: omp.inner.for.cond: 23370 // CHECK24-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23371 // CHECK24-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 23372 // CHECK24-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 23373 // CHECK24-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23374 // CHECK24: omp.inner.for.body: 23375 // CHECK24-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23376 // CHECK24-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 23377 // CHECK24-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 23378 // CHECK24-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 23379 // CHECK24-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18 23380 // CHECK24-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 23381 // CHECK24-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23382 // CHECK24-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 23383 // CHECK24-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 23384 // CHECK24-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 23385 // CHECK24-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 23386 // CHECK24-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18 23387 // CHECK24-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18 23388 // CHECK24-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 23389 // CHECK24-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23390 // CHECK24-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18 23391 // CHECK24-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 23392 // CHECK24-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 23393 // CHECK24-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 23394 // CHECK24-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18 23395 // CHECK24-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !18 23396 // CHECK24-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 23397 // CHECK24-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 23398 // CHECK24-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 23399 // CHECK24-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4, !llvm.access.group !18 23400 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23401 // CHECK24: omp.body.continue: 23402 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23403 // CHECK24: omp.inner.for.inc: 23404 // CHECK24-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23405 // CHECK24-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 23406 // CHECK24-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 23407 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 23408 // CHECK24: omp.inner.for.end: 23409 // CHECK24-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23410 // CHECK24: omp.loop.exit: 23411 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 23412 // CHECK24-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23413 // CHECK24-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 23414 // CHECK24-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23415 // CHECK24: .omp.final.then: 23416 // CHECK24-NEXT: store i64 400, i64* [[IT]], align 8 23417 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 23418 // CHECK24: .omp.final.done: 23419 // CHECK24-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23420 // CHECK24-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 23421 // CHECK24-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 23422 // CHECK24: .omp.linear.pu: 23423 // CHECK24-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 23424 // CHECK24-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 23425 // CHECK24-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 23426 // CHECK24-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 23427 // CHECK24-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 23428 // CHECK24: .omp.linear.pu.done: 23429 // CHECK24-NEXT: ret void 23430 // 23431 // 23432 // CHECK24-LABEL: define {{[^@]+}}@_Z7get_valv 23433 // CHECK24-SAME: () #[[ATTR3:[0-9]+]] { 23434 // CHECK24-NEXT: entry: 23435 // CHECK24-NEXT: ret i64 0 23436 // 23437 // 23438 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 23439 // CHECK24-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 23440 // CHECK24-NEXT: entry: 23441 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23442 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23443 // CHECK24-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 23444 // CHECK24-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 23445 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23446 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23447 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23448 // CHECK24-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 23449 // CHECK24-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 23450 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 23451 // CHECK24-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 23452 // CHECK24-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 23453 // CHECK24-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 23454 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 23455 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 23456 // CHECK24-NEXT: ret void 23457 // 23458 // 23459 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..2 23460 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 23461 // CHECK24-NEXT: entry: 23462 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23463 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23464 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23465 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23466 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 23467 // CHECK24-NEXT: [[TMP:%.*]] = alloca i16, align 2 23468 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 23469 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 23470 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 23471 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23472 // CHECK24-NEXT: [[IT:%.*]] = alloca i16, align 2 23473 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23474 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23475 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23476 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23477 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23478 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 23479 // CHECK24-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 23480 // CHECK24-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 23481 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23482 // CHECK24-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23483 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 23484 // CHECK24-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 23485 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23486 // CHECK24-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 23487 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23488 // CHECK24: cond.true: 23489 // CHECK24-NEXT: br label [[COND_END:%.*]] 23490 // CHECK24: cond.false: 23491 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23492 // CHECK24-NEXT: br label [[COND_END]] 23493 // CHECK24: cond.end: 23494 // CHECK24-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 23495 // CHECK24-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 23496 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 23497 // CHECK24-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 23498 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23499 // CHECK24: omp.inner.for.cond: 23500 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 23501 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 23502 // CHECK24-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 23503 // CHECK24-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23504 // CHECK24: omp.inner.for.body: 23505 // CHECK24-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 23506 // CHECK24-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 23507 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 23508 // CHECK24-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 23509 // CHECK24-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21 23510 // CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21 23511 // CHECK24-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 23512 // CHECK24-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21 23513 // CHECK24-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !21 23514 // CHECK24-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 23515 // CHECK24-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 23516 // CHECK24-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 23517 // CHECK24-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4, !llvm.access.group !21 23518 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23519 // CHECK24: omp.body.continue: 23520 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23521 // CHECK24: omp.inner.for.inc: 23522 // CHECK24-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 23523 // CHECK24-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 23524 // CHECK24-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 23525 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 23526 // CHECK24: omp.inner.for.end: 23527 // CHECK24-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23528 // CHECK24: omp.loop.exit: 23529 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 23530 // CHECK24-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23531 // CHECK24-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 23532 // CHECK24-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23533 // CHECK24: .omp.final.then: 23534 // CHECK24-NEXT: store i16 22, i16* [[IT]], align 2 23535 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 23536 // CHECK24: .omp.final.done: 23537 // CHECK24-NEXT: ret void 23538 // 23539 // 23540 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 23541 // CHECK24-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 23542 // CHECK24-NEXT: entry: 23543 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23544 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 23545 // CHECK24-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 23546 // CHECK24-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 23547 // CHECK24-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 23548 // CHECK24-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 23549 // CHECK24-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 23550 // CHECK24-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 23551 // CHECK24-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 23552 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 23553 // CHECK24-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 23554 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 23555 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23556 // CHECK24-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 23557 // CHECK24-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 23558 // CHECK24-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 23559 // CHECK24-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 23560 // CHECK24-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 23561 // CHECK24-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 23562 // CHECK24-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 23563 // CHECK24-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 23564 // CHECK24-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23565 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 23566 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 23567 // CHECK24-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 23568 // CHECK24-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 23569 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 23570 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 23571 // CHECK24-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 23572 // CHECK24-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 23573 // CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 23574 // CHECK24-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 23575 // CHECK24-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 23576 // CHECK24-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23577 // CHECK24-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 23578 // CHECK24-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 23579 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 23580 // CHECK24-NEXT: ret void 23581 // 23582 // 23583 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..3 23584 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 23585 // CHECK24-NEXT: entry: 23586 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23587 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23588 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23589 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 23590 // CHECK24-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 23591 // CHECK24-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 23592 // CHECK24-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 23593 // CHECK24-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 23594 // CHECK24-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 23595 // CHECK24-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 23596 // CHECK24-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 23597 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 23598 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 23599 // CHECK24-NEXT: [[TMP:%.*]] = alloca i8, align 1 23600 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 23601 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 23602 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 23603 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23604 // CHECK24-NEXT: [[IT:%.*]] = alloca i8, align 1 23605 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23606 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23607 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23608 // CHECK24-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 23609 // CHECK24-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 23610 // CHECK24-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 23611 // CHECK24-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 23612 // CHECK24-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 23613 // CHECK24-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 23614 // CHECK24-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 23615 // CHECK24-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 23616 // CHECK24-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23617 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 23618 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 23619 // CHECK24-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 23620 // CHECK24-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 23621 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 23622 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 23623 // CHECK24-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 23624 // CHECK24-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 23625 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 23626 // CHECK24-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 23627 // CHECK24-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 23628 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23629 // CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23630 // CHECK24-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23631 // CHECK24-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 23632 // CHECK24-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 23633 // CHECK24-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 23634 // CHECK24: omp.dispatch.cond: 23635 // CHECK24-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23636 // CHECK24-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 23637 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23638 // CHECK24: cond.true: 23639 // CHECK24-NEXT: br label [[COND_END:%.*]] 23640 // CHECK24: cond.false: 23641 // CHECK24-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23642 // CHECK24-NEXT: br label [[COND_END]] 23643 // CHECK24: cond.end: 23644 // CHECK24-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 23645 // CHECK24-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 23646 // CHECK24-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 23647 // CHECK24-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 23648 // CHECK24-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 23649 // CHECK24-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23650 // CHECK24-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 23651 // CHECK24-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 23652 // CHECK24: omp.dispatch.body: 23653 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23654 // CHECK24: omp.inner.for.cond: 23655 // CHECK24-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 23656 // CHECK24-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 23657 // CHECK24-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 23658 // CHECK24-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23659 // CHECK24: omp.inner.for.body: 23660 // CHECK24-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 23661 // CHECK24-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 23662 // CHECK24-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 23663 // CHECK24-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 23664 // CHECK24-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24 23665 // CHECK24-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24 23666 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 23667 // CHECK24-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24 23668 // CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 23669 // CHECK24-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24 23670 // CHECK24-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 23671 // CHECK24-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 23672 // CHECK24-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 23673 // CHECK24-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24 23674 // CHECK24-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 23675 // CHECK24-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 23676 // CHECK24-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 23677 // CHECK24-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 23678 // CHECK24-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 23679 // CHECK24-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24 23680 // CHECK24-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 23681 // CHECK24-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 23682 // CHECK24-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 23683 // CHECK24-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 23684 // CHECK24-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24 23685 // CHECK24-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 23686 // CHECK24-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 23687 // CHECK24-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 23688 // CHECK24-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 23689 // CHECK24-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 23690 // CHECK24-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24 23691 // CHECK24-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 23692 // CHECK24-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24 23693 // CHECK24-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 23694 // CHECK24-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24 23695 // CHECK24-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 23696 // CHECK24-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24 23697 // CHECK24-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 23698 // CHECK24-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 23699 // CHECK24-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 23700 // CHECK24-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24 23701 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23702 // CHECK24: omp.body.continue: 23703 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23704 // CHECK24: omp.inner.for.inc: 23705 // CHECK24-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 23706 // CHECK24-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 23707 // CHECK24-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 23708 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 23709 // CHECK24: omp.inner.for.end: 23710 // CHECK24-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 23711 // CHECK24: omp.dispatch.inc: 23712 // CHECK24-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 23713 // CHECK24-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 23714 // CHECK24-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 23715 // CHECK24-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 23716 // CHECK24-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 23717 // CHECK24-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 23718 // CHECK24-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 23719 // CHECK24-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 23720 // CHECK24-NEXT: br label [[OMP_DISPATCH_COND]] 23721 // CHECK24: omp.dispatch.end: 23722 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 23723 // CHECK24-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23724 // CHECK24-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 23725 // CHECK24-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23726 // CHECK24: .omp.final.then: 23727 // CHECK24-NEXT: store i8 96, i8* [[IT]], align 1 23728 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 23729 // CHECK24: .omp.final.done: 23730 // CHECK24-NEXT: ret void 23731 // 23732 // 23733 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 23734 // CHECK24-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 23735 // CHECK24-NEXT: entry: 23736 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23737 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23738 // CHECK24-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 23739 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 23740 // CHECK24-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 23741 // CHECK24-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 23742 // CHECK24-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 23743 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23744 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23745 // CHECK24-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 23746 // CHECK24-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 23747 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23748 // CHECK24-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 23749 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 23750 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 23751 // CHECK24-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 23752 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 23753 // CHECK24-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 23754 // CHECK24-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 23755 // CHECK24-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 23756 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 23757 // CHECK24-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 23758 // CHECK24-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 23759 // CHECK24-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 23760 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 23761 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 23762 // CHECK24-NEXT: ret void 23763 // 23764 // 23765 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..4 23766 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 23767 // CHECK24-NEXT: entry: 23768 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23769 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23770 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 23771 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 23772 // CHECK24-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 23773 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 23774 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 23775 // CHECK24-NEXT: [[TMP:%.*]] = alloca i32, align 4 23776 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23777 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23778 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 23779 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 23780 // CHECK24-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 23781 // CHECK24-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 23782 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 23783 // CHECK24-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 23784 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 23785 // CHECK24-NEXT: ret void 23786 // 23787 // 23788 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 23789 // CHECK24-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 23790 // CHECK24-NEXT: entry: 23791 // CHECK24-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 23792 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 23793 // CHECK24-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 23794 // CHECK24-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 23795 // CHECK24-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 23796 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 23797 // CHECK24-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 23798 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 23799 // CHECK24-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 23800 // CHECK24-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 23801 // CHECK24-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 23802 // CHECK24-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 23803 // CHECK24-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 23804 // CHECK24-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 23805 // CHECK24-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 23806 // CHECK24-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 23807 // CHECK24-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23808 // CHECK24-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 23809 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 23810 // CHECK24-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 23811 // CHECK24-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 23812 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 23813 // CHECK24-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 23814 // CHECK24-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 23815 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 23816 // CHECK24-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 4 23817 // CHECK24-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 23818 // CHECK24-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* 23819 // CHECK24-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 23820 // CHECK24-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 23821 // CHECK24-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 23822 // CHECK24-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 4 23823 // CHECK24-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 23824 // CHECK24-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 23825 // CHECK24: omp_if.then: 23826 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) 23827 // CHECK24-NEXT: br label [[OMP_IF_END:%.*]] 23828 // CHECK24: omp_if.else: 23829 // CHECK24-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 23830 // CHECK24-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 23831 // CHECK24-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 23832 // CHECK24-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]] 23833 // CHECK24-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) 23834 // CHECK24-NEXT: br label [[OMP_IF_END]] 23835 // CHECK24: omp_if.end: 23836 // CHECK24-NEXT: ret void 23837 // 23838 // 23839 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..5 23840 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 23841 // CHECK24-NEXT: entry: 23842 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 23843 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 23844 // CHECK24-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 23845 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 23846 // CHECK24-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 23847 // CHECK24-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 23848 // CHECK24-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 23849 // CHECK24-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 23850 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 23851 // CHECK24-NEXT: [[TMP:%.*]] = alloca i64, align 4 23852 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 23853 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 23854 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 23855 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 23856 // CHECK24-NEXT: [[IT:%.*]] = alloca i64, align 8 23857 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 23858 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 23859 // CHECK24-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 23860 // CHECK24-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 23861 // CHECK24-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 23862 // CHECK24-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 23863 // CHECK24-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 23864 // CHECK24-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 23865 // CHECK24-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 23866 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 23867 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 23868 // CHECK24-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 23869 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* 23870 // CHECK24-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 23871 // CHECK24-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 23872 // CHECK24-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 23873 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 23874 // CHECK24-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 4 23875 // CHECK24-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 23876 // CHECK24-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 23877 // CHECK24: omp_if.then: 23878 // CHECK24-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23879 // CHECK24-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 23880 // CHECK24-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 23881 // CHECK24-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23882 // CHECK24-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 23883 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 23884 // CHECK24: cond.true: 23885 // CHECK24-NEXT: br label [[COND_END:%.*]] 23886 // CHECK24: cond.false: 23887 // CHECK24-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23888 // CHECK24-NEXT: br label [[COND_END]] 23889 // CHECK24: cond.end: 23890 // CHECK24-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 23891 // CHECK24-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 23892 // CHECK24-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 23893 // CHECK24-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 23894 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 23895 // CHECK24: omp.inner.for.cond: 23896 // CHECK24-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23897 // CHECK24-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27 23898 // CHECK24-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] 23899 // CHECK24-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 23900 // CHECK24: omp.inner.for.body: 23901 // CHECK24-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23902 // CHECK24-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 23903 // CHECK24-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 23904 // CHECK24-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27 23905 // CHECK24-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27 23906 // CHECK24-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double 23907 // CHECK24-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 23908 // CHECK24-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 23909 // CHECK24-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !28, !llvm.access.group !27 23910 // CHECK24-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23911 // CHECK24-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27 23912 // CHECK24-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 23913 // CHECK24-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27 23914 // CHECK24-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 23915 // CHECK24-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] 23916 // CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] 23917 // CHECK24-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 23918 // CHECK24-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !27 23919 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 23920 // CHECK24: omp.body.continue: 23921 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 23922 // CHECK24: omp.inner.for.inc: 23923 // CHECK24-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23924 // CHECK24-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 23925 // CHECK24-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27 23926 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] 23927 // CHECK24: omp.inner.for.end: 23928 // CHECK24-NEXT: br label [[OMP_IF_END:%.*]] 23929 // CHECK24: omp_if.else: 23930 // CHECK24-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23931 // CHECK24-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 23932 // CHECK24-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 23933 // CHECK24-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23934 // CHECK24-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 23935 // CHECK24-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] 23936 // CHECK24: cond.true10: 23937 // CHECK24-NEXT: br label [[COND_END12:%.*]] 23938 // CHECK24: cond.false11: 23939 // CHECK24-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23940 // CHECK24-NEXT: br label [[COND_END12]] 23941 // CHECK24: cond.end12: 23942 // CHECK24-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] 23943 // CHECK24-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 23944 // CHECK24-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 23945 // CHECK24-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 23946 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] 23947 // CHECK24: omp.inner.for.cond14: 23948 // CHECK24-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23949 // CHECK24-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 23950 // CHECK24-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 23951 // CHECK24-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 23952 // CHECK24: omp.inner.for.body16: 23953 // CHECK24-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23954 // CHECK24-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 23955 // CHECK24-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] 23956 // CHECK24-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 23957 // CHECK24-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 23958 // CHECK24-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double 23959 // CHECK24-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 23960 // CHECK24-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23961 // CHECK24-NEXT: store double [[ADD20]], double* [[A21]], align 4 23962 // CHECK24-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 23963 // CHECK24-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 23964 // CHECK24-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 23965 // CHECK24-NEXT: store double [[INC23]], double* [[A22]], align 4 23966 // CHECK24-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 23967 // CHECK24-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] 23968 // CHECK24-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] 23969 // CHECK24-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 23970 // CHECK24-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 23971 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 23972 // CHECK24: omp.body.continue27: 23973 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 23974 // CHECK24: omp.inner.for.inc28: 23975 // CHECK24-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 23976 // CHECK24-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 23977 // CHECK24-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 23978 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP31:![0-9]+]] 23979 // CHECK24: omp.inner.for.end30: 23980 // CHECK24-NEXT: br label [[OMP_IF_END]] 23981 // CHECK24: omp_if.end: 23982 // CHECK24-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 23983 // CHECK24: omp.loop.exit: 23984 // CHECK24-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 23985 // CHECK24-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 23986 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 23987 // CHECK24-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 23988 // CHECK24-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 23989 // CHECK24-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 23990 // CHECK24: .omp.final.then: 23991 // CHECK24-NEXT: store i64 400, i64* [[IT]], align 8 23992 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 23993 // CHECK24: .omp.final.done: 23994 // CHECK24-NEXT: ret void 23995 // 23996 // 23997 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 23998 // CHECK24-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 23999 // CHECK24-NEXT: entry: 24000 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 24001 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 24002 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 24003 // CHECK24-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 24004 // CHECK24-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 24005 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 24006 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 24007 // CHECK24-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 24008 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 24009 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 24010 // CHECK24-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 24011 // CHECK24-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 24012 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 24013 // CHECK24-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 24014 // CHECK24-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 24015 // CHECK24-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 24016 // CHECK24-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 24017 // CHECK24-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 24018 // CHECK24-NEXT: ret void 24019 // 24020 // 24021 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..6 24022 // CHECK24-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 24023 // CHECK24-NEXT: entry: 24024 // CHECK24-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 24025 // CHECK24-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 24026 // CHECK24-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 24027 // CHECK24-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 24028 // CHECK24-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 24029 // CHECK24-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 24030 // CHECK24-NEXT: [[TMP:%.*]] = alloca i64, align 4 24031 // CHECK24-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 24032 // CHECK24-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 24033 // CHECK24-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 24034 // CHECK24-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 24035 // CHECK24-NEXT: [[I:%.*]] = alloca i64, align 8 24036 // CHECK24-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 24037 // CHECK24-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 24038 // CHECK24-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 24039 // CHECK24-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 24040 // CHECK24-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 24041 // CHECK24-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 24042 // CHECK24-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 24043 // CHECK24-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 24044 // CHECK24-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 24045 // CHECK24-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 24046 // CHECK24-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 24047 // CHECK24-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 24048 // CHECK24-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 24049 // CHECK24-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 24050 // CHECK24-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 24051 // CHECK24-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 24052 // CHECK24-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 24053 // CHECK24: cond.true: 24054 // CHECK24-NEXT: br label [[COND_END:%.*]] 24055 // CHECK24: cond.false: 24056 // CHECK24-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 24057 // CHECK24-NEXT: br label [[COND_END]] 24058 // CHECK24: cond.end: 24059 // CHECK24-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 24060 // CHECK24-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 24061 // CHECK24-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 24062 // CHECK24-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 24063 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24064 // CHECK24: omp.inner.for.cond: 24065 // CHECK24-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 24066 // CHECK24-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !33 24067 // CHECK24-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 24068 // CHECK24-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24069 // CHECK24: omp.inner.for.body: 24070 // CHECK24-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 24071 // CHECK24-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 24072 // CHECK24-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 24073 // CHECK24-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !33 24074 // CHECK24-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33 24075 // CHECK24-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 24076 // CHECK24-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33 24077 // CHECK24-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4, !llvm.access.group !33 24078 // CHECK24-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 24079 // CHECK24-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 24080 // CHECK24-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 24081 // CHECK24-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4, !llvm.access.group !33 24082 // CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 24083 // CHECK24-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33 24084 // CHECK24-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 24085 // CHECK24-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33 24086 // CHECK24-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24087 // CHECK24: omp.body.continue: 24088 // CHECK24-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24089 // CHECK24: omp.inner.for.inc: 24090 // CHECK24-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 24091 // CHECK24-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 24092 // CHECK24-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33 24093 // CHECK24-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 24094 // CHECK24: omp.inner.for.end: 24095 // CHECK24-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 24096 // CHECK24: omp.loop.exit: 24097 // CHECK24-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 24098 // CHECK24-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 24099 // CHECK24-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 24100 // CHECK24-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 24101 // CHECK24: .omp.final.then: 24102 // CHECK24-NEXT: store i64 11, i64* [[I]], align 8 24103 // CHECK24-NEXT: br label [[DOTOMP_FINAL_DONE]] 24104 // CHECK24: .omp.final.done: 24105 // CHECK24-NEXT: ret void 24106 // 24107 // 24108 // CHECK25-LABEL: define {{[^@]+}}@_Z7get_valv 24109 // CHECK25-SAME: () #[[ATTR0:[0-9]+]] { 24110 // CHECK25-NEXT: entry: 24111 // CHECK25-NEXT: ret i64 0 24112 // 24113 // 24114 // CHECK25-LABEL: define {{[^@]+}}@_Z3fooi 24115 // CHECK25-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24116 // CHECK25-NEXT: entry: 24117 // CHECK25-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24118 // CHECK25-NEXT: [[A:%.*]] = alloca i32, align 4 24119 // CHECK25-NEXT: [[AA:%.*]] = alloca i16, align 2 24120 // CHECK25-NEXT: [[B:%.*]] = alloca [10 x float], align 4 24121 // CHECK25-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 24122 // CHECK25-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 24123 // CHECK25-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 24124 // CHECK25-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 24125 // CHECK25-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 24126 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 24127 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 24128 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 24129 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 24130 // CHECK25-NEXT: [[I:%.*]] = alloca i32, align 4 24131 // CHECK25-NEXT: [[K:%.*]] = alloca i64, align 8 24132 // CHECK25-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 24133 // CHECK25-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 24134 // CHECK25-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 24135 // CHECK25-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 24136 // CHECK25-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 24137 // CHECK25-NEXT: [[I7:%.*]] = alloca i32, align 4 24138 // CHECK25-NEXT: [[K8:%.*]] = alloca i64, align 8 24139 // CHECK25-NEXT: [[LIN:%.*]] = alloca i32, align 4 24140 // CHECK25-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 24141 // CHECK25-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 24142 // CHECK25-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 24143 // CHECK25-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 24144 // CHECK25-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 24145 // CHECK25-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 24146 // CHECK25-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 24147 // CHECK25-NEXT: [[IT:%.*]] = alloca i64, align 8 24148 // CHECK25-NEXT: [[LIN27:%.*]] = alloca i32, align 4 24149 // CHECK25-NEXT: [[A28:%.*]] = alloca i32, align 4 24150 // CHECK25-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 24151 // CHECK25-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 24152 // CHECK25-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 24153 // CHECK25-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 24154 // CHECK25-NEXT: [[IT53:%.*]] = alloca i16, align 2 24155 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 24156 // CHECK25-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 24157 // CHECK25-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 24158 // CHECK25-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 24159 // CHECK25-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 24160 // CHECK25-NEXT: [[IT72:%.*]] = alloca i8, align 1 24161 // CHECK25-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24162 // CHECK25-NEXT: store i32 0, i32* [[A]], align 4 24163 // CHECK25-NEXT: store i16 0, i16* [[AA]], align 2 24164 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24165 // CHECK25-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 24166 // CHECK25-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 24167 // CHECK25-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 24168 // CHECK25-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 24169 // CHECK25-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 24170 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 24171 // CHECK25-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 24172 // CHECK25-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 24173 // CHECK25-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 24174 // CHECK25-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 24175 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 24176 // CHECK25-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 24177 // CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 24178 // CHECK25-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 24179 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24180 // CHECK25: omp.inner.for.cond: 24181 // CHECK25-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24182 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 24183 // CHECK25-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 24184 // CHECK25-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24185 // CHECK25: omp.inner.for.body: 24186 // CHECK25-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24187 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 24188 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 24189 // CHECK25-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 24190 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24191 // CHECK25: omp.body.continue: 24192 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24193 // CHECK25: omp.inner.for.inc: 24194 // CHECK25-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24195 // CHECK25-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 24196 // CHECK25-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24197 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 24198 // CHECK25: omp.inner.for.end: 24199 // CHECK25-NEXT: store i32 33, i32* [[I]], align 4 24200 // CHECK25-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 24201 // CHECK25-NEXT: store i64 [[CALL]], i64* [[K]], align 8 24202 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 24203 // CHECK25-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 24204 // CHECK25-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 24205 // CHECK25-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 24206 // CHECK25-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 24207 // CHECK25-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 24208 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 24209 // CHECK25: omp.inner.for.cond9: 24210 // CHECK25-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24211 // CHECK25-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 24212 // CHECK25-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 24213 // CHECK25-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 24214 // CHECK25: omp.inner.for.body11: 24215 // CHECK25-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24216 // CHECK25-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 24217 // CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 24218 // CHECK25-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 24219 // CHECK25-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 24220 // CHECK25-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24221 // CHECK25-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 24222 // CHECK25-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 24223 // CHECK25-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 24224 // CHECK25-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 24225 // CHECK25-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 24226 // CHECK25-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 24227 // CHECK25-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 24228 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 24229 // CHECK25: omp.body.continue16: 24230 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 24231 // CHECK25: omp.inner.for.inc17: 24232 // CHECK25-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24233 // CHECK25-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 24234 // CHECK25-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24235 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 24236 // CHECK25: omp.inner.for.end19: 24237 // CHECK25-NEXT: store i32 1, i32* [[I7]], align 4 24238 // CHECK25-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 24239 // CHECK25-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 24240 // CHECK25-NEXT: store i32 12, i32* [[LIN]], align 4 24241 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 24242 // CHECK25-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 24243 // CHECK25-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 24244 // CHECK25-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 24245 // CHECK25-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 24246 // CHECK25-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 24247 // CHECK25-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 24248 // CHECK25-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 24249 // CHECK25-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 24250 // CHECK25-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 24251 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 24252 // CHECK25: omp.inner.for.cond29: 24253 // CHECK25-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24254 // CHECK25-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 24255 // CHECK25-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 24256 // CHECK25-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 24257 // CHECK25: omp.inner.for.body31: 24258 // CHECK25-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24259 // CHECK25-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 24260 // CHECK25-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 24261 // CHECK25-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 24262 // CHECK25-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 24263 // CHECK25-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 24264 // CHECK25-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24265 // CHECK25-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 24266 // CHECK25-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 24267 // CHECK25-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 24268 // CHECK25-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 24269 // CHECK25-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 24270 // CHECK25-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 24271 // CHECK25-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 24272 // CHECK25-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24273 // CHECK25-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 24274 // CHECK25-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 24275 // CHECK25-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 24276 // CHECK25-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 24277 // CHECK25-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 24278 // CHECK25-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 24279 // CHECK25-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 24280 // CHECK25-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 24281 // CHECK25-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 24282 // CHECK25-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 24283 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 24284 // CHECK25: omp.body.continue45: 24285 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 24286 // CHECK25: omp.inner.for.inc46: 24287 // CHECK25-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24288 // CHECK25-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 24289 // CHECK25-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24290 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 24291 // CHECK25: omp.inner.for.end48: 24292 // CHECK25-NEXT: store i64 400, i64* [[IT]], align 8 24293 // CHECK25-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 24294 // CHECK25-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 24295 // CHECK25-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 24296 // CHECK25-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 24297 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 24298 // CHECK25-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 24299 // CHECK25-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 24300 // CHECK25-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 24301 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 24302 // CHECK25: omp.inner.for.cond54: 24303 // CHECK25-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24304 // CHECK25-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 24305 // CHECK25-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 24306 // CHECK25-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 24307 // CHECK25: omp.inner.for.body56: 24308 // CHECK25-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24309 // CHECK25-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 24310 // CHECK25-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 24311 // CHECK25-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 24312 // CHECK25-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 24313 // CHECK25-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 24314 // CHECK25-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 24315 // CHECK25-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 24316 // CHECK25-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 24317 // CHECK25-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 24318 // CHECK25-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 24319 // CHECK25-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 24320 // CHECK25-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 24321 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 24322 // CHECK25: omp.body.continue64: 24323 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 24324 // CHECK25: omp.inner.for.inc65: 24325 // CHECK25-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24326 // CHECK25-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 24327 // CHECK25-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24328 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 24329 // CHECK25: omp.inner.for.end67: 24330 // CHECK25-NEXT: store i16 22, i16* [[IT53]], align 2 24331 // CHECK25-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 24332 // CHECK25-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 24333 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 24334 // CHECK25-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 24335 // CHECK25-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 24336 // CHECK25-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 24337 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 24338 // CHECK25: omp.inner.for.cond73: 24339 // CHECK25-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24340 // CHECK25-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 24341 // CHECK25-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 24342 // CHECK25-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 24343 // CHECK25: omp.inner.for.body75: 24344 // CHECK25-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24345 // CHECK25-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 24346 // CHECK25-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 24347 // CHECK25-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 24348 // CHECK25-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 24349 // CHECK25-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 24350 // CHECK25-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 24351 // CHECK25-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 24352 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 24353 // CHECK25-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 24354 // CHECK25-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 24355 // CHECK25-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 24356 // CHECK25-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 24357 // CHECK25-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 24358 // CHECK25-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 24359 // CHECK25-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 24360 // CHECK25-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 24361 // CHECK25-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 24362 // CHECK25-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 24363 // CHECK25-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 24364 // CHECK25-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 24365 // CHECK25-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 24366 // CHECK25-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 24367 // CHECK25-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 24368 // CHECK25-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 24369 // CHECK25-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 24370 // CHECK25-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 24371 // CHECK25-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 24372 // CHECK25-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 24373 // CHECK25-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 24374 // CHECK25-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 24375 // CHECK25-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 24376 // CHECK25-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 24377 // CHECK25-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 24378 // CHECK25-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 24379 // CHECK25-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 24380 // CHECK25-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 24381 // CHECK25-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 24382 // CHECK25-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 24383 // CHECK25-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 24384 // CHECK25-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 24385 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 24386 // CHECK25: omp.body.continue97: 24387 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 24388 // CHECK25: omp.inner.for.inc98: 24389 // CHECK25-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24390 // CHECK25-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 24391 // CHECK25-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24392 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 24393 // CHECK25: omp.inner.for.end100: 24394 // CHECK25-NEXT: store i8 96, i8* [[IT72]], align 1 24395 // CHECK25-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 24396 // CHECK25-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 24397 // CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 24398 // CHECK25-NEXT: ret i32 [[TMP58]] 24399 // 24400 // 24401 // CHECK25-LABEL: define {{[^@]+}}@_Z3bari 24402 // CHECK25-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24403 // CHECK25-NEXT: entry: 24404 // CHECK25-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24405 // CHECK25-NEXT: [[A:%.*]] = alloca i32, align 4 24406 // CHECK25-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 24407 // CHECK25-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24408 // CHECK25-NEXT: store i32 0, i32* [[A]], align 4 24409 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24410 // CHECK25-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 24411 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 24412 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 24413 // CHECK25-NEXT: store i32 [[ADD]], i32* [[A]], align 4 24414 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 24415 // CHECK25-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 24416 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 24417 // CHECK25-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 24418 // CHECK25-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 24419 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 24420 // CHECK25-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 24421 // CHECK25-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 24422 // CHECK25-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 24423 // CHECK25-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 24424 // CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 24425 // CHECK25-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 24426 // CHECK25-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 24427 // CHECK25-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 24428 // CHECK25-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 24429 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 24430 // CHECK25-NEXT: ret i32 [[TMP8]] 24431 // 24432 // 24433 // CHECK25-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 24434 // CHECK25-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 24435 // CHECK25-NEXT: entry: 24436 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 24437 // CHECK25-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24438 // CHECK25-NEXT: [[B:%.*]] = alloca i32, align 4 24439 // CHECK25-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 24440 // CHECK25-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 24441 // CHECK25-NEXT: [[TMP:%.*]] = alloca i64, align 8 24442 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 24443 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 24444 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 24445 // CHECK25-NEXT: [[IT:%.*]] = alloca i64, align 8 24446 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 24447 // CHECK25-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24448 // CHECK25-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 24449 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24450 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 24451 // CHECK25-NEXT: store i32 [[ADD]], i32* [[B]], align 4 24452 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 24453 // CHECK25-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 24454 // CHECK25-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 24455 // CHECK25-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 24456 // CHECK25-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 24457 // CHECK25-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 24458 // CHECK25-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 24459 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 24460 // CHECK25-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 24461 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 24462 // CHECK25-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 24463 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24464 // CHECK25: omp.inner.for.cond: 24465 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24466 // CHECK25-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 24467 // CHECK25-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] 24468 // CHECK25-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24469 // CHECK25: omp.inner.for.body: 24470 // CHECK25-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24471 // CHECK25-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 24472 // CHECK25-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 24473 // CHECK25-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 24474 // CHECK25-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 24475 // CHECK25-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double 24476 // CHECK25-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 24477 // CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 24478 // CHECK25-NEXT: store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18 24479 // CHECK25-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 24480 // CHECK25-NEXT: [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18 24481 // CHECK25-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 24482 // CHECK25-NEXT: store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18 24483 // CHECK25-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 24484 // CHECK25-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] 24485 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]] 24486 // CHECK25-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 24487 // CHECK25-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18 24488 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24489 // CHECK25: omp.body.continue: 24490 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24491 // CHECK25: omp.inner.for.inc: 24492 // CHECK25-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24493 // CHECK25-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 24494 // CHECK25-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24495 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 24496 // CHECK25: omp.inner.for.end: 24497 // CHECK25-NEXT: store i64 400, i64* [[IT]], align 8 24498 // CHECK25-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 24499 // CHECK25-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 24500 // CHECK25-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1 24501 // CHECK25-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 24502 // CHECK25-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 24503 // CHECK25-NEXT: [[TMP15:%.*]] = load i32, i32* [[B]], align 4 24504 // CHECK25-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] 24505 // CHECK25-NEXT: [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 24506 // CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP16]]) 24507 // CHECK25-NEXT: ret i32 [[ADD10]] 24508 // 24509 // 24510 // CHECK25-LABEL: define {{[^@]+}}@_ZL7fstatici 24511 // CHECK25-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24512 // CHECK25-NEXT: entry: 24513 // CHECK25-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24514 // CHECK25-NEXT: [[A:%.*]] = alloca i32, align 4 24515 // CHECK25-NEXT: [[AA:%.*]] = alloca i16, align 2 24516 // CHECK25-NEXT: [[AAA:%.*]] = alloca i8, align 1 24517 // CHECK25-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 24518 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 24519 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 24520 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 24521 // CHECK25-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24522 // CHECK25-NEXT: store i32 0, i32* [[A]], align 4 24523 // CHECK25-NEXT: store i16 0, i16* [[AA]], align 2 24524 // CHECK25-NEXT: store i8 0, i8* [[AAA]], align 1 24525 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 24526 // CHECK25-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 24527 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 24528 // CHECK25-NEXT: ret i32 [[TMP0]] 24529 // 24530 // 24531 // CHECK25-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 24532 // CHECK25-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 24533 // CHECK25-NEXT: entry: 24534 // CHECK25-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24535 // CHECK25-NEXT: [[A:%.*]] = alloca i32, align 4 24536 // CHECK25-NEXT: [[AA:%.*]] = alloca i16, align 2 24537 // CHECK25-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 24538 // CHECK25-NEXT: [[TMP:%.*]] = alloca i64, align 8 24539 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 24540 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 24541 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 24542 // CHECK25-NEXT: [[I:%.*]] = alloca i64, align 8 24543 // CHECK25-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24544 // CHECK25-NEXT: store i32 0, i32* [[A]], align 4 24545 // CHECK25-NEXT: store i16 0, i16* [[AA]], align 2 24546 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 24547 // CHECK25-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 24548 // CHECK25-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 24549 // CHECK25-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 24550 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24551 // CHECK25: omp.inner.for.cond: 24552 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 24553 // CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21 24554 // CHECK25-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 24555 // CHECK25-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24556 // CHECK25: omp.inner.for.body: 24557 // CHECK25-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 24558 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 24559 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 24560 // CHECK25-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21 24561 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21 24562 // CHECK25-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 24563 // CHECK25-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21 24564 // CHECK25-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21 24565 // CHECK25-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 24566 // CHECK25-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 24567 // CHECK25-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 24568 // CHECK25-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21 24569 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 24570 // CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 24571 // CHECK25-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 24572 // CHECK25-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 24573 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24574 // CHECK25: omp.body.continue: 24575 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24576 // CHECK25: omp.inner.for.inc: 24577 // CHECK25-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 24578 // CHECK25-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 24579 // CHECK25-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 24580 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 24581 // CHECK25: omp.inner.for.end: 24582 // CHECK25-NEXT: store i64 11, i64* [[I]], align 8 24583 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 24584 // CHECK25-NEXT: ret i32 [[TMP8]] 24585 // 24586 // 24587 // CHECK26-LABEL: define {{[^@]+}}@_Z7get_valv 24588 // CHECK26-SAME: () #[[ATTR0:[0-9]+]] { 24589 // CHECK26-NEXT: entry: 24590 // CHECK26-NEXT: ret i64 0 24591 // 24592 // 24593 // CHECK26-LABEL: define {{[^@]+}}@_Z3fooi 24594 // CHECK26-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24595 // CHECK26-NEXT: entry: 24596 // CHECK26-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24597 // CHECK26-NEXT: [[A:%.*]] = alloca i32, align 4 24598 // CHECK26-NEXT: [[AA:%.*]] = alloca i16, align 2 24599 // CHECK26-NEXT: [[B:%.*]] = alloca [10 x float], align 4 24600 // CHECK26-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 24601 // CHECK26-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 24602 // CHECK26-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 24603 // CHECK26-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 24604 // CHECK26-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 24605 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 24606 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 24607 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 24608 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 24609 // CHECK26-NEXT: [[I:%.*]] = alloca i32, align 4 24610 // CHECK26-NEXT: [[K:%.*]] = alloca i64, align 8 24611 // CHECK26-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 24612 // CHECK26-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 24613 // CHECK26-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 24614 // CHECK26-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 24615 // CHECK26-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 24616 // CHECK26-NEXT: [[I7:%.*]] = alloca i32, align 4 24617 // CHECK26-NEXT: [[K8:%.*]] = alloca i64, align 8 24618 // CHECK26-NEXT: [[LIN:%.*]] = alloca i32, align 4 24619 // CHECK26-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 24620 // CHECK26-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 24621 // CHECK26-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 24622 // CHECK26-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 24623 // CHECK26-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 24624 // CHECK26-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 24625 // CHECK26-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 24626 // CHECK26-NEXT: [[IT:%.*]] = alloca i64, align 8 24627 // CHECK26-NEXT: [[LIN27:%.*]] = alloca i32, align 4 24628 // CHECK26-NEXT: [[A28:%.*]] = alloca i32, align 4 24629 // CHECK26-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 24630 // CHECK26-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 24631 // CHECK26-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 24632 // CHECK26-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 24633 // CHECK26-NEXT: [[IT53:%.*]] = alloca i16, align 2 24634 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 24635 // CHECK26-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 24636 // CHECK26-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 24637 // CHECK26-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 24638 // CHECK26-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 24639 // CHECK26-NEXT: [[IT72:%.*]] = alloca i8, align 1 24640 // CHECK26-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24641 // CHECK26-NEXT: store i32 0, i32* [[A]], align 4 24642 // CHECK26-NEXT: store i16 0, i16* [[AA]], align 2 24643 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24644 // CHECK26-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 24645 // CHECK26-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 24646 // CHECK26-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 24647 // CHECK26-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 24648 // CHECK26-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 24649 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 24650 // CHECK26-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 24651 // CHECK26-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 24652 // CHECK26-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 24653 // CHECK26-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 24654 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 24655 // CHECK26-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 24656 // CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 24657 // CHECK26-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 24658 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24659 // CHECK26: omp.inner.for.cond: 24660 // CHECK26-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24661 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 24662 // CHECK26-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 24663 // CHECK26-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24664 // CHECK26: omp.inner.for.body: 24665 // CHECK26-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24666 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 24667 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 24668 // CHECK26-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 24669 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24670 // CHECK26: omp.body.continue: 24671 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24672 // CHECK26: omp.inner.for.inc: 24673 // CHECK26-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24674 // CHECK26-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 24675 // CHECK26-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 24676 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 24677 // CHECK26: omp.inner.for.end: 24678 // CHECK26-NEXT: store i32 33, i32* [[I]], align 4 24679 // CHECK26-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 24680 // CHECK26-NEXT: store i64 [[CALL]], i64* [[K]], align 8 24681 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 24682 // CHECK26-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 24683 // CHECK26-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 24684 // CHECK26-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 24685 // CHECK26-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 24686 // CHECK26-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 24687 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 24688 // CHECK26: omp.inner.for.cond9: 24689 // CHECK26-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24690 // CHECK26-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 24691 // CHECK26-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 24692 // CHECK26-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 24693 // CHECK26: omp.inner.for.body11: 24694 // CHECK26-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24695 // CHECK26-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 24696 // CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 24697 // CHECK26-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 24698 // CHECK26-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 24699 // CHECK26-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24700 // CHECK26-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 24701 // CHECK26-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 24702 // CHECK26-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 24703 // CHECK26-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 24704 // CHECK26-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 24705 // CHECK26-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 24706 // CHECK26-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 24707 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 24708 // CHECK26: omp.body.continue16: 24709 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 24710 // CHECK26: omp.inner.for.inc17: 24711 // CHECK26-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24712 // CHECK26-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 24713 // CHECK26-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 24714 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 24715 // CHECK26: omp.inner.for.end19: 24716 // CHECK26-NEXT: store i32 1, i32* [[I7]], align 4 24717 // CHECK26-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 24718 // CHECK26-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 24719 // CHECK26-NEXT: store i32 12, i32* [[LIN]], align 4 24720 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 24721 // CHECK26-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 24722 // CHECK26-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 24723 // CHECK26-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 24724 // CHECK26-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 24725 // CHECK26-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 24726 // CHECK26-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 24727 // CHECK26-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 24728 // CHECK26-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 24729 // CHECK26-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 24730 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 24731 // CHECK26: omp.inner.for.cond29: 24732 // CHECK26-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24733 // CHECK26-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 24734 // CHECK26-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 24735 // CHECK26-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 24736 // CHECK26: omp.inner.for.body31: 24737 // CHECK26-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24738 // CHECK26-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 24739 // CHECK26-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 24740 // CHECK26-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 24741 // CHECK26-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 24742 // CHECK26-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 24743 // CHECK26-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24744 // CHECK26-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 24745 // CHECK26-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 24746 // CHECK26-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 24747 // CHECK26-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 24748 // CHECK26-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 24749 // CHECK26-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 24750 // CHECK26-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 24751 // CHECK26-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24752 // CHECK26-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 24753 // CHECK26-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 24754 // CHECK26-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 24755 // CHECK26-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 24756 // CHECK26-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 24757 // CHECK26-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 24758 // CHECK26-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 24759 // CHECK26-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 24760 // CHECK26-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 24761 // CHECK26-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 24762 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 24763 // CHECK26: omp.body.continue45: 24764 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 24765 // CHECK26: omp.inner.for.inc46: 24766 // CHECK26-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24767 // CHECK26-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 24768 // CHECK26-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 24769 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 24770 // CHECK26: omp.inner.for.end48: 24771 // CHECK26-NEXT: store i64 400, i64* [[IT]], align 8 24772 // CHECK26-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 24773 // CHECK26-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 24774 // CHECK26-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 24775 // CHECK26-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 24776 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 24777 // CHECK26-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 24778 // CHECK26-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 24779 // CHECK26-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 24780 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 24781 // CHECK26: omp.inner.for.cond54: 24782 // CHECK26-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24783 // CHECK26-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 24784 // CHECK26-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 24785 // CHECK26-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 24786 // CHECK26: omp.inner.for.body56: 24787 // CHECK26-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24788 // CHECK26-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 24789 // CHECK26-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 24790 // CHECK26-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 24791 // CHECK26-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 24792 // CHECK26-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 24793 // CHECK26-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 24794 // CHECK26-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 24795 // CHECK26-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 24796 // CHECK26-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 24797 // CHECK26-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 24798 // CHECK26-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 24799 // CHECK26-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 24800 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 24801 // CHECK26: omp.body.continue64: 24802 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 24803 // CHECK26: omp.inner.for.inc65: 24804 // CHECK26-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24805 // CHECK26-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 24806 // CHECK26-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 24807 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 24808 // CHECK26: omp.inner.for.end67: 24809 // CHECK26-NEXT: store i16 22, i16* [[IT53]], align 2 24810 // CHECK26-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 24811 // CHECK26-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 24812 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 24813 // CHECK26-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 24814 // CHECK26-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 24815 // CHECK26-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 24816 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 24817 // CHECK26: omp.inner.for.cond73: 24818 // CHECK26-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24819 // CHECK26-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 24820 // CHECK26-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 24821 // CHECK26-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 24822 // CHECK26: omp.inner.for.body75: 24823 // CHECK26-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24824 // CHECK26-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 24825 // CHECK26-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 24826 // CHECK26-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 24827 // CHECK26-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 24828 // CHECK26-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 24829 // CHECK26-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 24830 // CHECK26-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 24831 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 24832 // CHECK26-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 24833 // CHECK26-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 24834 // CHECK26-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 24835 // CHECK26-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 24836 // CHECK26-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 24837 // CHECK26-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 24838 // CHECK26-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 24839 // CHECK26-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 24840 // CHECK26-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 24841 // CHECK26-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 24842 // CHECK26-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 24843 // CHECK26-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 24844 // CHECK26-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 24845 // CHECK26-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 24846 // CHECK26-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 24847 // CHECK26-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 24848 // CHECK26-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 24849 // CHECK26-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 24850 // CHECK26-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 24851 // CHECK26-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 24852 // CHECK26-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 24853 // CHECK26-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 24854 // CHECK26-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 24855 // CHECK26-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 24856 // CHECK26-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 24857 // CHECK26-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 24858 // CHECK26-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 24859 // CHECK26-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 24860 // CHECK26-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 24861 // CHECK26-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 24862 // CHECK26-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 24863 // CHECK26-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 24864 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 24865 // CHECK26: omp.body.continue97: 24866 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 24867 // CHECK26: omp.inner.for.inc98: 24868 // CHECK26-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24869 // CHECK26-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 24870 // CHECK26-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 24871 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 24872 // CHECK26: omp.inner.for.end100: 24873 // CHECK26-NEXT: store i8 96, i8* [[IT72]], align 1 24874 // CHECK26-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 24875 // CHECK26-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 24876 // CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 24877 // CHECK26-NEXT: ret i32 [[TMP58]] 24878 // 24879 // 24880 // CHECK26-LABEL: define {{[^@]+}}@_Z3bari 24881 // CHECK26-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24882 // CHECK26-NEXT: entry: 24883 // CHECK26-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24884 // CHECK26-NEXT: [[A:%.*]] = alloca i32, align 4 24885 // CHECK26-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 24886 // CHECK26-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24887 // CHECK26-NEXT: store i32 0, i32* [[A]], align 4 24888 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24889 // CHECK26-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 24890 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 24891 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 24892 // CHECK26-NEXT: store i32 [[ADD]], i32* [[A]], align 4 24893 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 24894 // CHECK26-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 24895 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 24896 // CHECK26-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 24897 // CHECK26-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 24898 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 24899 // CHECK26-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 24900 // CHECK26-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 24901 // CHECK26-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 24902 // CHECK26-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 24903 // CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 24904 // CHECK26-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 24905 // CHECK26-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 24906 // CHECK26-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 24907 // CHECK26-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 24908 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 24909 // CHECK26-NEXT: ret i32 [[TMP8]] 24910 // 24911 // 24912 // CHECK26-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 24913 // CHECK26-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 24914 // CHECK26-NEXT: entry: 24915 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 24916 // CHECK26-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24917 // CHECK26-NEXT: [[B:%.*]] = alloca i32, align 4 24918 // CHECK26-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 24919 // CHECK26-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 24920 // CHECK26-NEXT: [[TMP:%.*]] = alloca i64, align 8 24921 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 24922 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 24923 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 24924 // CHECK26-NEXT: [[IT:%.*]] = alloca i64, align 8 24925 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 24926 // CHECK26-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 24927 // CHECK26-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 24928 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 24929 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 24930 // CHECK26-NEXT: store i32 [[ADD]], i32* [[B]], align 4 24931 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 24932 // CHECK26-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 24933 // CHECK26-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 24934 // CHECK26-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 24935 // CHECK26-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 24936 // CHECK26-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 24937 // CHECK26-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 24938 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 24939 // CHECK26-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 24940 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 24941 // CHECK26-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 24942 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 24943 // CHECK26: omp.inner.for.cond: 24944 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24945 // CHECK26-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 24946 // CHECK26-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] 24947 // CHECK26-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 24948 // CHECK26: omp.inner.for.body: 24949 // CHECK26-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24950 // CHECK26-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 24951 // CHECK26-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 24952 // CHECK26-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 24953 // CHECK26-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 24954 // CHECK26-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double 24955 // CHECK26-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 24956 // CHECK26-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 24957 // CHECK26-NEXT: store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18 24958 // CHECK26-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 24959 // CHECK26-NEXT: [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18 24960 // CHECK26-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 24961 // CHECK26-NEXT: store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18 24962 // CHECK26-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 24963 // CHECK26-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] 24964 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]] 24965 // CHECK26-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 24966 // CHECK26-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18 24967 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 24968 // CHECK26: omp.body.continue: 24969 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 24970 // CHECK26: omp.inner.for.inc: 24971 // CHECK26-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24972 // CHECK26-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 24973 // CHECK26-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 24974 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 24975 // CHECK26: omp.inner.for.end: 24976 // CHECK26-NEXT: store i64 400, i64* [[IT]], align 8 24977 // CHECK26-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 24978 // CHECK26-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 24979 // CHECK26-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1 24980 // CHECK26-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 24981 // CHECK26-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 24982 // CHECK26-NEXT: [[TMP15:%.*]] = load i32, i32* [[B]], align 4 24983 // CHECK26-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] 24984 // CHECK26-NEXT: [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 24985 // CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP16]]) 24986 // CHECK26-NEXT: ret i32 [[ADD10]] 24987 // 24988 // 24989 // CHECK26-LABEL: define {{[^@]+}}@_ZL7fstatici 24990 // CHECK26-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 24991 // CHECK26-NEXT: entry: 24992 // CHECK26-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 24993 // CHECK26-NEXT: [[A:%.*]] = alloca i32, align 4 24994 // CHECK26-NEXT: [[AA:%.*]] = alloca i16, align 2 24995 // CHECK26-NEXT: [[AAA:%.*]] = alloca i8, align 1 24996 // CHECK26-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 24997 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 24998 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 24999 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 25000 // CHECK26-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25001 // CHECK26-NEXT: store i32 0, i32* [[A]], align 4 25002 // CHECK26-NEXT: store i16 0, i16* [[AA]], align 2 25003 // CHECK26-NEXT: store i8 0, i8* [[AAA]], align 1 25004 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 25005 // CHECK26-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 25006 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 25007 // CHECK26-NEXT: ret i32 [[TMP0]] 25008 // 25009 // 25010 // CHECK26-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 25011 // CHECK26-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 25012 // CHECK26-NEXT: entry: 25013 // CHECK26-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25014 // CHECK26-NEXT: [[A:%.*]] = alloca i32, align 4 25015 // CHECK26-NEXT: [[AA:%.*]] = alloca i16, align 2 25016 // CHECK26-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 25017 // CHECK26-NEXT: [[TMP:%.*]] = alloca i64, align 8 25018 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 25019 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 25020 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 25021 // CHECK26-NEXT: [[I:%.*]] = alloca i64, align 8 25022 // CHECK26-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25023 // CHECK26-NEXT: store i32 0, i32* [[A]], align 4 25024 // CHECK26-NEXT: store i16 0, i16* [[AA]], align 2 25025 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 25026 // CHECK26-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 25027 // CHECK26-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 25028 // CHECK26-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 25029 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25030 // CHECK26: omp.inner.for.cond: 25031 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 25032 // CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21 25033 // CHECK26-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 25034 // CHECK26-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25035 // CHECK26: omp.inner.for.body: 25036 // CHECK26-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 25037 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 25038 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 25039 // CHECK26-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21 25040 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21 25041 // CHECK26-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 25042 // CHECK26-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21 25043 // CHECK26-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21 25044 // CHECK26-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 25045 // CHECK26-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 25046 // CHECK26-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 25047 // CHECK26-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21 25048 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 25049 // CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 25050 // CHECK26-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 25051 // CHECK26-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 25052 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25053 // CHECK26: omp.body.continue: 25054 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25055 // CHECK26: omp.inner.for.inc: 25056 // CHECK26-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 25057 // CHECK26-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 25058 // CHECK26-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21 25059 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 25060 // CHECK26: omp.inner.for.end: 25061 // CHECK26-NEXT: store i64 11, i64* [[I]], align 8 25062 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 25063 // CHECK26-NEXT: ret i32 [[TMP8]] 25064 // 25065 // 25066 // CHECK27-LABEL: define {{[^@]+}}@_Z7get_valv 25067 // CHECK27-SAME: () #[[ATTR0:[0-9]+]] { 25068 // CHECK27-NEXT: entry: 25069 // CHECK27-NEXT: ret i64 0 25070 // 25071 // 25072 // CHECK27-LABEL: define {{[^@]+}}@_Z3fooi 25073 // CHECK27-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25074 // CHECK27-NEXT: entry: 25075 // CHECK27-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25076 // CHECK27-NEXT: [[A:%.*]] = alloca i32, align 4 25077 // CHECK27-NEXT: [[AA:%.*]] = alloca i16, align 2 25078 // CHECK27-NEXT: [[B:%.*]] = alloca [10 x float], align 4 25079 // CHECK27-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 25080 // CHECK27-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 25081 // CHECK27-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 25082 // CHECK27-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 25083 // CHECK27-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 25084 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 25085 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 25086 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 25087 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 25088 // CHECK27-NEXT: [[I:%.*]] = alloca i32, align 4 25089 // CHECK27-NEXT: [[K:%.*]] = alloca i64, align 8 25090 // CHECK27-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 25091 // CHECK27-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 25092 // CHECK27-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 25093 // CHECK27-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 25094 // CHECK27-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 25095 // CHECK27-NEXT: [[I7:%.*]] = alloca i32, align 4 25096 // CHECK27-NEXT: [[K8:%.*]] = alloca i64, align 8 25097 // CHECK27-NEXT: [[LIN:%.*]] = alloca i32, align 4 25098 // CHECK27-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 25099 // CHECK27-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 25100 // CHECK27-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 25101 // CHECK27-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 25102 // CHECK27-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 25103 // CHECK27-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 25104 // CHECK27-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 25105 // CHECK27-NEXT: [[IT:%.*]] = alloca i64, align 8 25106 // CHECK27-NEXT: [[LIN27:%.*]] = alloca i32, align 4 25107 // CHECK27-NEXT: [[A28:%.*]] = alloca i32, align 4 25108 // CHECK27-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 25109 // CHECK27-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 25110 // CHECK27-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 25111 // CHECK27-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 25112 // CHECK27-NEXT: [[IT53:%.*]] = alloca i16, align 2 25113 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 25114 // CHECK27-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 25115 // CHECK27-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 25116 // CHECK27-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 25117 // CHECK27-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 25118 // CHECK27-NEXT: [[IT72:%.*]] = alloca i8, align 1 25119 // CHECK27-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25120 // CHECK27-NEXT: store i32 0, i32* [[A]], align 4 25121 // CHECK27-NEXT: store i16 0, i16* [[AA]], align 2 25122 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25123 // CHECK27-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 25124 // CHECK27-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 25125 // CHECK27-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 25126 // CHECK27-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 25127 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 25128 // CHECK27-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 25129 // CHECK27-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 25130 // CHECK27-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 25131 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 25132 // CHECK27-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 25133 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 25134 // CHECK27-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 25135 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25136 // CHECK27: omp.inner.for.cond: 25137 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25138 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 25139 // CHECK27-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 25140 // CHECK27-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25141 // CHECK27: omp.inner.for.body: 25142 // CHECK27-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25143 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 25144 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 25145 // CHECK27-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 25146 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25147 // CHECK27: omp.body.continue: 25148 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25149 // CHECK27: omp.inner.for.inc: 25150 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25151 // CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 25152 // CHECK27-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25153 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 25154 // CHECK27: omp.inner.for.end: 25155 // CHECK27-NEXT: store i32 33, i32* [[I]], align 4 25156 // CHECK27-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 25157 // CHECK27-NEXT: store i64 [[CALL]], i64* [[K]], align 8 25158 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 25159 // CHECK27-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 25160 // CHECK27-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 25161 // CHECK27-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 25162 // CHECK27-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 25163 // CHECK27-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 25164 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 25165 // CHECK27: omp.inner.for.cond9: 25166 // CHECK27-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25167 // CHECK27-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 25168 // CHECK27-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 25169 // CHECK27-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 25170 // CHECK27: omp.inner.for.body11: 25171 // CHECK27-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25172 // CHECK27-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 25173 // CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 25174 // CHECK27-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 25175 // CHECK27-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 25176 // CHECK27-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25177 // CHECK27-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 25178 // CHECK27-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 25179 // CHECK27-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 25180 // CHECK27-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 25181 // CHECK27-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 25182 // CHECK27-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 25183 // CHECK27-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 25184 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 25185 // CHECK27: omp.body.continue16: 25186 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 25187 // CHECK27: omp.inner.for.inc17: 25188 // CHECK27-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25189 // CHECK27-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 25190 // CHECK27-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25191 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 25192 // CHECK27: omp.inner.for.end19: 25193 // CHECK27-NEXT: store i32 1, i32* [[I7]], align 4 25194 // CHECK27-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 25195 // CHECK27-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 25196 // CHECK27-NEXT: store i32 12, i32* [[LIN]], align 4 25197 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 25198 // CHECK27-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 25199 // CHECK27-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 25200 // CHECK27-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 25201 // CHECK27-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 25202 // CHECK27-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 25203 // CHECK27-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 25204 // CHECK27-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 25205 // CHECK27-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 25206 // CHECK27-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 25207 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 25208 // CHECK27: omp.inner.for.cond29: 25209 // CHECK27-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25210 // CHECK27-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 25211 // CHECK27-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 25212 // CHECK27-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 25213 // CHECK27: omp.inner.for.body31: 25214 // CHECK27-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25215 // CHECK27-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 25216 // CHECK27-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 25217 // CHECK27-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 25218 // CHECK27-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 25219 // CHECK27-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 25220 // CHECK27-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25221 // CHECK27-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 25222 // CHECK27-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 25223 // CHECK27-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 25224 // CHECK27-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 25225 // CHECK27-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 25226 // CHECK27-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 25227 // CHECK27-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 25228 // CHECK27-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25229 // CHECK27-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 25230 // CHECK27-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 25231 // CHECK27-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 25232 // CHECK27-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 25233 // CHECK27-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 25234 // CHECK27-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 25235 // CHECK27-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 25236 // CHECK27-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 25237 // CHECK27-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 25238 // CHECK27-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 25239 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 25240 // CHECK27: omp.body.continue45: 25241 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 25242 // CHECK27: omp.inner.for.inc46: 25243 // CHECK27-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25244 // CHECK27-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 25245 // CHECK27-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25246 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 25247 // CHECK27: omp.inner.for.end48: 25248 // CHECK27-NEXT: store i64 400, i64* [[IT]], align 8 25249 // CHECK27-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 25250 // CHECK27-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 25251 // CHECK27-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 25252 // CHECK27-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 25253 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 25254 // CHECK27-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 25255 // CHECK27-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 25256 // CHECK27-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 25257 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 25258 // CHECK27: omp.inner.for.cond54: 25259 // CHECK27-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25260 // CHECK27-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 25261 // CHECK27-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 25262 // CHECK27-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 25263 // CHECK27: omp.inner.for.body56: 25264 // CHECK27-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25265 // CHECK27-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 25266 // CHECK27-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 25267 // CHECK27-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 25268 // CHECK27-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 25269 // CHECK27-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 25270 // CHECK27-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 25271 // CHECK27-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 25272 // CHECK27-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 25273 // CHECK27-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 25274 // CHECK27-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 25275 // CHECK27-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 25276 // CHECK27-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 25277 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 25278 // CHECK27: omp.body.continue64: 25279 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 25280 // CHECK27: omp.inner.for.inc65: 25281 // CHECK27-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25282 // CHECK27-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 25283 // CHECK27-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25284 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 25285 // CHECK27: omp.inner.for.end67: 25286 // CHECK27-NEXT: store i16 22, i16* [[IT53]], align 2 25287 // CHECK27-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 25288 // CHECK27-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 25289 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 25290 // CHECK27-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 25291 // CHECK27-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 25292 // CHECK27-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 25293 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 25294 // CHECK27: omp.inner.for.cond73: 25295 // CHECK27-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25296 // CHECK27-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 25297 // CHECK27-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 25298 // CHECK27-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 25299 // CHECK27: omp.inner.for.body75: 25300 // CHECK27-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25301 // CHECK27-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 25302 // CHECK27-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 25303 // CHECK27-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 25304 // CHECK27-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 25305 // CHECK27-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 25306 // CHECK27-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 25307 // CHECK27-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 25308 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 25309 // CHECK27-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 25310 // CHECK27-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 25311 // CHECK27-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 25312 // CHECK27-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 25313 // CHECK27-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 25314 // CHECK27-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 25315 // CHECK27-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 25316 // CHECK27-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 25317 // CHECK27-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 25318 // CHECK27-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 25319 // CHECK27-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 25320 // CHECK27-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 25321 // CHECK27-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 25322 // CHECK27-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 25323 // CHECK27-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 25324 // CHECK27-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 25325 // CHECK27-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 25326 // CHECK27-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 25327 // CHECK27-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 25328 // CHECK27-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 25329 // CHECK27-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 25330 // CHECK27-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 25331 // CHECK27-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 25332 // CHECK27-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 25333 // CHECK27-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 25334 // CHECK27-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 25335 // CHECK27-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 25336 // CHECK27-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 25337 // CHECK27-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 25338 // CHECK27-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 25339 // CHECK27-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 25340 // CHECK27-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 25341 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 25342 // CHECK27: omp.body.continue97: 25343 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 25344 // CHECK27: omp.inner.for.inc98: 25345 // CHECK27-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25346 // CHECK27-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 25347 // CHECK27-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25348 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 25349 // CHECK27: omp.inner.for.end100: 25350 // CHECK27-NEXT: store i8 96, i8* [[IT72]], align 1 25351 // CHECK27-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 25352 // CHECK27-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 25353 // CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 25354 // CHECK27-NEXT: ret i32 [[TMP56]] 25355 // 25356 // 25357 // CHECK27-LABEL: define {{[^@]+}}@_Z3bari 25358 // CHECK27-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25359 // CHECK27-NEXT: entry: 25360 // CHECK27-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25361 // CHECK27-NEXT: [[A:%.*]] = alloca i32, align 4 25362 // CHECK27-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 25363 // CHECK27-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25364 // CHECK27-NEXT: store i32 0, i32* [[A]], align 4 25365 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25366 // CHECK27-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 25367 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 25368 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 25369 // CHECK27-NEXT: store i32 [[ADD]], i32* [[A]], align 4 25370 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 25371 // CHECK27-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 25372 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 25373 // CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 25374 // CHECK27-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 25375 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 25376 // CHECK27-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 25377 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 25378 // CHECK27-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 25379 // CHECK27-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 25380 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 25381 // CHECK27-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 25382 // CHECK27-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 25383 // CHECK27-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 25384 // CHECK27-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 25385 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 25386 // CHECK27-NEXT: ret i32 [[TMP8]] 25387 // 25388 // 25389 // CHECK27-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 25390 // CHECK27-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 25391 // CHECK27-NEXT: entry: 25392 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 25393 // CHECK27-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25394 // CHECK27-NEXT: [[B:%.*]] = alloca i32, align 4 25395 // CHECK27-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 25396 // CHECK27-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 25397 // CHECK27-NEXT: [[TMP:%.*]] = alloca i64, align 4 25398 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 25399 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 25400 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 25401 // CHECK27-NEXT: [[IT:%.*]] = alloca i64, align 8 25402 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 25403 // CHECK27-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25404 // CHECK27-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 25405 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25406 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 25407 // CHECK27-NEXT: store i32 [[ADD]], i32* [[B]], align 4 25408 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 25409 // CHECK27-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 25410 // CHECK27-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 25411 // CHECK27-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 25412 // CHECK27-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 25413 // CHECK27-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 25414 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 25415 // CHECK27-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 25416 // CHECK27-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 25417 // CHECK27-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8 25418 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25419 // CHECK27: omp.inner.for.cond: 25420 // CHECK27-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25421 // CHECK27-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 25422 // CHECK27-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] 25423 // CHECK27-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25424 // CHECK27: omp.inner.for.body: 25425 // CHECK27-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25426 // CHECK27-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 25427 // CHECK27-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 25428 // CHECK27-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 25429 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 25430 // CHECK27-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double 25431 // CHECK27-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 25432 // CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 25433 // CHECK27-NEXT: store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19 25434 // CHECK27-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 25435 // CHECK27-NEXT: [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19 25436 // CHECK27-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 25437 // CHECK27-NEXT: store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19 25438 // CHECK27-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 25439 // CHECK27-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] 25440 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]] 25441 // CHECK27-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 25442 // CHECK27-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19 25443 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25444 // CHECK27: omp.body.continue: 25445 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25446 // CHECK27: omp.inner.for.inc: 25447 // CHECK27-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25448 // CHECK27-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 25449 // CHECK27-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25450 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 25451 // CHECK27: omp.inner.for.end: 25452 // CHECK27-NEXT: store i64 400, i64* [[IT]], align 8 25453 // CHECK27-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 25454 // CHECK27-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 25455 // CHECK27-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1 25456 // CHECK27-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 25457 // CHECK27-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 25458 // CHECK27-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4 25459 // CHECK27-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] 25460 // CHECK27-NEXT: [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 25461 // CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP15]]) 25462 // CHECK27-NEXT: ret i32 [[ADD10]] 25463 // 25464 // 25465 // CHECK27-LABEL: define {{[^@]+}}@_ZL7fstatici 25466 // CHECK27-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25467 // CHECK27-NEXT: entry: 25468 // CHECK27-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25469 // CHECK27-NEXT: [[A:%.*]] = alloca i32, align 4 25470 // CHECK27-NEXT: [[AA:%.*]] = alloca i16, align 2 25471 // CHECK27-NEXT: [[AAA:%.*]] = alloca i8, align 1 25472 // CHECK27-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 25473 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 25474 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 25475 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 25476 // CHECK27-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25477 // CHECK27-NEXT: store i32 0, i32* [[A]], align 4 25478 // CHECK27-NEXT: store i16 0, i16* [[AA]], align 2 25479 // CHECK27-NEXT: store i8 0, i8* [[AAA]], align 1 25480 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 25481 // CHECK27-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 25482 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 25483 // CHECK27-NEXT: ret i32 [[TMP0]] 25484 // 25485 // 25486 // CHECK27-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 25487 // CHECK27-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 25488 // CHECK27-NEXT: entry: 25489 // CHECK27-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25490 // CHECK27-NEXT: [[A:%.*]] = alloca i32, align 4 25491 // CHECK27-NEXT: [[AA:%.*]] = alloca i16, align 2 25492 // CHECK27-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 25493 // CHECK27-NEXT: [[TMP:%.*]] = alloca i64, align 4 25494 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 25495 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 25496 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 25497 // CHECK27-NEXT: [[I:%.*]] = alloca i64, align 8 25498 // CHECK27-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25499 // CHECK27-NEXT: store i32 0, i32* [[A]], align 4 25500 // CHECK27-NEXT: store i16 0, i16* [[AA]], align 2 25501 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 25502 // CHECK27-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 25503 // CHECK27-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 25504 // CHECK27-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 25505 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25506 // CHECK27: omp.inner.for.cond: 25507 // CHECK27-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25508 // CHECK27-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22 25509 // CHECK27-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 25510 // CHECK27-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25511 // CHECK27: omp.inner.for.body: 25512 // CHECK27-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25513 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 25514 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 25515 // CHECK27-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22 25516 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22 25517 // CHECK27-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 25518 // CHECK27-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22 25519 // CHECK27-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22 25520 // CHECK27-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 25521 // CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 25522 // CHECK27-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 25523 // CHECK27-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22 25524 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 25525 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 25526 // CHECK27-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 25527 // CHECK27-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 25528 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25529 // CHECK27: omp.body.continue: 25530 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25531 // CHECK27: omp.inner.for.inc: 25532 // CHECK27-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25533 // CHECK27-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 25534 // CHECK27-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25535 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 25536 // CHECK27: omp.inner.for.end: 25537 // CHECK27-NEXT: store i64 11, i64* [[I]], align 8 25538 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 25539 // CHECK27-NEXT: ret i32 [[TMP8]] 25540 // 25541 // 25542 // CHECK28-LABEL: define {{[^@]+}}@_Z7get_valv 25543 // CHECK28-SAME: () #[[ATTR0:[0-9]+]] { 25544 // CHECK28-NEXT: entry: 25545 // CHECK28-NEXT: ret i64 0 25546 // 25547 // 25548 // CHECK28-LABEL: define {{[^@]+}}@_Z3fooi 25549 // CHECK28-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25550 // CHECK28-NEXT: entry: 25551 // CHECK28-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25552 // CHECK28-NEXT: [[A:%.*]] = alloca i32, align 4 25553 // CHECK28-NEXT: [[AA:%.*]] = alloca i16, align 2 25554 // CHECK28-NEXT: [[B:%.*]] = alloca [10 x float], align 4 25555 // CHECK28-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 25556 // CHECK28-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 25557 // CHECK28-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 25558 // CHECK28-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 25559 // CHECK28-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 25560 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 25561 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 25562 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 25563 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 25564 // CHECK28-NEXT: [[I:%.*]] = alloca i32, align 4 25565 // CHECK28-NEXT: [[K:%.*]] = alloca i64, align 8 25566 // CHECK28-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 25567 // CHECK28-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 25568 // CHECK28-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 25569 // CHECK28-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 25570 // CHECK28-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 25571 // CHECK28-NEXT: [[I7:%.*]] = alloca i32, align 4 25572 // CHECK28-NEXT: [[K8:%.*]] = alloca i64, align 8 25573 // CHECK28-NEXT: [[LIN:%.*]] = alloca i32, align 4 25574 // CHECK28-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 25575 // CHECK28-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 25576 // CHECK28-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 25577 // CHECK28-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 25578 // CHECK28-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 25579 // CHECK28-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 25580 // CHECK28-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 25581 // CHECK28-NEXT: [[IT:%.*]] = alloca i64, align 8 25582 // CHECK28-NEXT: [[LIN27:%.*]] = alloca i32, align 4 25583 // CHECK28-NEXT: [[A28:%.*]] = alloca i32, align 4 25584 // CHECK28-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 25585 // CHECK28-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 25586 // CHECK28-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 25587 // CHECK28-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 25588 // CHECK28-NEXT: [[IT53:%.*]] = alloca i16, align 2 25589 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 25590 // CHECK28-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 25591 // CHECK28-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 25592 // CHECK28-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 25593 // CHECK28-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 25594 // CHECK28-NEXT: [[IT72:%.*]] = alloca i8, align 1 25595 // CHECK28-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25596 // CHECK28-NEXT: store i32 0, i32* [[A]], align 4 25597 // CHECK28-NEXT: store i16 0, i16* [[AA]], align 2 25598 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25599 // CHECK28-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 25600 // CHECK28-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 25601 // CHECK28-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 25602 // CHECK28-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 25603 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 25604 // CHECK28-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 25605 // CHECK28-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 25606 // CHECK28-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 25607 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 25608 // CHECK28-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 25609 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 25610 // CHECK28-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 25611 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25612 // CHECK28: omp.inner.for.cond: 25613 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25614 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 25615 // CHECK28-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 25616 // CHECK28-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25617 // CHECK28: omp.inner.for.body: 25618 // CHECK28-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25619 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 25620 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 25621 // CHECK28-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 25622 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25623 // CHECK28: omp.body.continue: 25624 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25625 // CHECK28: omp.inner.for.inc: 25626 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25627 // CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 25628 // CHECK28-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 25629 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 25630 // CHECK28: omp.inner.for.end: 25631 // CHECK28-NEXT: store i32 33, i32* [[I]], align 4 25632 // CHECK28-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 25633 // CHECK28-NEXT: store i64 [[CALL]], i64* [[K]], align 8 25634 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 25635 // CHECK28-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 25636 // CHECK28-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 25637 // CHECK28-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 25638 // CHECK28-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 25639 // CHECK28-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 25640 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 25641 // CHECK28: omp.inner.for.cond9: 25642 // CHECK28-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25643 // CHECK28-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 25644 // CHECK28-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 25645 // CHECK28-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 25646 // CHECK28: omp.inner.for.body11: 25647 // CHECK28-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25648 // CHECK28-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 25649 // CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 25650 // CHECK28-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 25651 // CHECK28-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 25652 // CHECK28-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25653 // CHECK28-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 25654 // CHECK28-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 25655 // CHECK28-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 25656 // CHECK28-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 25657 // CHECK28-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 25658 // CHECK28-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 25659 // CHECK28-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 25660 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 25661 // CHECK28: omp.body.continue16: 25662 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 25663 // CHECK28: omp.inner.for.inc17: 25664 // CHECK28-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25665 // CHECK28-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 25666 // CHECK28-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 25667 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 25668 // CHECK28: omp.inner.for.end19: 25669 // CHECK28-NEXT: store i32 1, i32* [[I7]], align 4 25670 // CHECK28-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 25671 // CHECK28-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 25672 // CHECK28-NEXT: store i32 12, i32* [[LIN]], align 4 25673 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 25674 // CHECK28-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 25675 // CHECK28-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 25676 // CHECK28-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 25677 // CHECK28-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 25678 // CHECK28-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 25679 // CHECK28-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 25680 // CHECK28-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 25681 // CHECK28-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 25682 // CHECK28-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 25683 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 25684 // CHECK28: omp.inner.for.cond29: 25685 // CHECK28-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25686 // CHECK28-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 25687 // CHECK28-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 25688 // CHECK28-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 25689 // CHECK28: omp.inner.for.body31: 25690 // CHECK28-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25691 // CHECK28-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 25692 // CHECK28-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 25693 // CHECK28-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 25694 // CHECK28-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 25695 // CHECK28-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 25696 // CHECK28-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25697 // CHECK28-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 25698 // CHECK28-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 25699 // CHECK28-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 25700 // CHECK28-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 25701 // CHECK28-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 25702 // CHECK28-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 25703 // CHECK28-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 25704 // CHECK28-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25705 // CHECK28-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 25706 // CHECK28-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 25707 // CHECK28-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 25708 // CHECK28-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 25709 // CHECK28-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 25710 // CHECK28-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 25711 // CHECK28-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 25712 // CHECK28-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 25713 // CHECK28-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 25714 // CHECK28-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 25715 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 25716 // CHECK28: omp.body.continue45: 25717 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 25718 // CHECK28: omp.inner.for.inc46: 25719 // CHECK28-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25720 // CHECK28-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 25721 // CHECK28-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 25722 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 25723 // CHECK28: omp.inner.for.end48: 25724 // CHECK28-NEXT: store i64 400, i64* [[IT]], align 8 25725 // CHECK28-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 25726 // CHECK28-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 25727 // CHECK28-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 25728 // CHECK28-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 25729 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 25730 // CHECK28-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 25731 // CHECK28-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 25732 // CHECK28-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 25733 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 25734 // CHECK28: omp.inner.for.cond54: 25735 // CHECK28-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25736 // CHECK28-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 25737 // CHECK28-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 25738 // CHECK28-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 25739 // CHECK28: omp.inner.for.body56: 25740 // CHECK28-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25741 // CHECK28-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 25742 // CHECK28-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 25743 // CHECK28-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 25744 // CHECK28-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 25745 // CHECK28-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 25746 // CHECK28-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 25747 // CHECK28-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 25748 // CHECK28-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 25749 // CHECK28-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 25750 // CHECK28-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 25751 // CHECK28-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 25752 // CHECK28-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 25753 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 25754 // CHECK28: omp.body.continue64: 25755 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 25756 // CHECK28: omp.inner.for.inc65: 25757 // CHECK28-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25758 // CHECK28-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 25759 // CHECK28-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 25760 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 25761 // CHECK28: omp.inner.for.end67: 25762 // CHECK28-NEXT: store i16 22, i16* [[IT53]], align 2 25763 // CHECK28-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 25764 // CHECK28-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 25765 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 25766 // CHECK28-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 25767 // CHECK28-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 25768 // CHECK28-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 25769 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 25770 // CHECK28: omp.inner.for.cond73: 25771 // CHECK28-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25772 // CHECK28-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 25773 // CHECK28-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 25774 // CHECK28-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 25775 // CHECK28: omp.inner.for.body75: 25776 // CHECK28-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25777 // CHECK28-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 25778 // CHECK28-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 25779 // CHECK28-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 25780 // CHECK28-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 25781 // CHECK28-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 25782 // CHECK28-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 25783 // CHECK28-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 25784 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 25785 // CHECK28-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 25786 // CHECK28-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 25787 // CHECK28-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 25788 // CHECK28-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 25789 // CHECK28-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 25790 // CHECK28-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 25791 // CHECK28-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 25792 // CHECK28-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 25793 // CHECK28-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 25794 // CHECK28-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 25795 // CHECK28-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 25796 // CHECK28-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 25797 // CHECK28-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 25798 // CHECK28-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 25799 // CHECK28-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 25800 // CHECK28-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 25801 // CHECK28-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 25802 // CHECK28-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 25803 // CHECK28-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 25804 // CHECK28-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 25805 // CHECK28-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 25806 // CHECK28-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 25807 // CHECK28-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 25808 // CHECK28-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 25809 // CHECK28-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 25810 // CHECK28-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 25811 // CHECK28-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 25812 // CHECK28-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 25813 // CHECK28-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 25814 // CHECK28-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 25815 // CHECK28-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 25816 // CHECK28-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 25817 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 25818 // CHECK28: omp.body.continue97: 25819 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 25820 // CHECK28: omp.inner.for.inc98: 25821 // CHECK28-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25822 // CHECK28-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 25823 // CHECK28-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 25824 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 25825 // CHECK28: omp.inner.for.end100: 25826 // CHECK28-NEXT: store i8 96, i8* [[IT72]], align 1 25827 // CHECK28-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 25828 // CHECK28-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 25829 // CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 25830 // CHECK28-NEXT: ret i32 [[TMP56]] 25831 // 25832 // 25833 // CHECK28-LABEL: define {{[^@]+}}@_Z3bari 25834 // CHECK28-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25835 // CHECK28-NEXT: entry: 25836 // CHECK28-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25837 // CHECK28-NEXT: [[A:%.*]] = alloca i32, align 4 25838 // CHECK28-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 25839 // CHECK28-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25840 // CHECK28-NEXT: store i32 0, i32* [[A]], align 4 25841 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25842 // CHECK28-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 25843 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 25844 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 25845 // CHECK28-NEXT: store i32 [[ADD]], i32* [[A]], align 4 25846 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 25847 // CHECK28-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 25848 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 25849 // CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 25850 // CHECK28-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 25851 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 25852 // CHECK28-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 25853 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 25854 // CHECK28-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 25855 // CHECK28-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 25856 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 25857 // CHECK28-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 25858 // CHECK28-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 25859 // CHECK28-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 25860 // CHECK28-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 25861 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 25862 // CHECK28-NEXT: ret i32 [[TMP8]] 25863 // 25864 // 25865 // CHECK28-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 25866 // CHECK28-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 25867 // CHECK28-NEXT: entry: 25868 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 25869 // CHECK28-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25870 // CHECK28-NEXT: [[B:%.*]] = alloca i32, align 4 25871 // CHECK28-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 25872 // CHECK28-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 25873 // CHECK28-NEXT: [[TMP:%.*]] = alloca i64, align 4 25874 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 25875 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 25876 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 25877 // CHECK28-NEXT: [[IT:%.*]] = alloca i64, align 8 25878 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 25879 // CHECK28-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25880 // CHECK28-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 25881 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 25882 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 25883 // CHECK28-NEXT: store i32 [[ADD]], i32* [[B]], align 4 25884 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 25885 // CHECK28-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 25886 // CHECK28-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 25887 // CHECK28-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 25888 // CHECK28-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 25889 // CHECK28-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 25890 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 25891 // CHECK28-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 25892 // CHECK28-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 25893 // CHECK28-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8 25894 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25895 // CHECK28: omp.inner.for.cond: 25896 // CHECK28-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25897 // CHECK28-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 25898 // CHECK28-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] 25899 // CHECK28-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25900 // CHECK28: omp.inner.for.body: 25901 // CHECK28-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25902 // CHECK28-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 25903 // CHECK28-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 25904 // CHECK28-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 25905 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 25906 // CHECK28-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double 25907 // CHECK28-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 25908 // CHECK28-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 25909 // CHECK28-NEXT: store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19 25910 // CHECK28-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 25911 // CHECK28-NEXT: [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19 25912 // CHECK28-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 25913 // CHECK28-NEXT: store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19 25914 // CHECK28-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 25915 // CHECK28-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] 25916 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]] 25917 // CHECK28-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 25918 // CHECK28-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19 25919 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 25920 // CHECK28: omp.body.continue: 25921 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 25922 // CHECK28: omp.inner.for.inc: 25923 // CHECK28-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25924 // CHECK28-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 25925 // CHECK28-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 25926 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 25927 // CHECK28: omp.inner.for.end: 25928 // CHECK28-NEXT: store i64 400, i64* [[IT]], align 8 25929 // CHECK28-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 25930 // CHECK28-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 25931 // CHECK28-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1 25932 // CHECK28-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 25933 // CHECK28-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 25934 // CHECK28-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4 25935 // CHECK28-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] 25936 // CHECK28-NEXT: [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 25937 // CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP15]]) 25938 // CHECK28-NEXT: ret i32 [[ADD10]] 25939 // 25940 // 25941 // CHECK28-LABEL: define {{[^@]+}}@_ZL7fstatici 25942 // CHECK28-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 25943 // CHECK28-NEXT: entry: 25944 // CHECK28-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25945 // CHECK28-NEXT: [[A:%.*]] = alloca i32, align 4 25946 // CHECK28-NEXT: [[AA:%.*]] = alloca i16, align 2 25947 // CHECK28-NEXT: [[AAA:%.*]] = alloca i8, align 1 25948 // CHECK28-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 25949 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 25950 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 25951 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 25952 // CHECK28-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25953 // CHECK28-NEXT: store i32 0, i32* [[A]], align 4 25954 // CHECK28-NEXT: store i16 0, i16* [[AA]], align 2 25955 // CHECK28-NEXT: store i8 0, i8* [[AAA]], align 1 25956 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 25957 // CHECK28-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 25958 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 25959 // CHECK28-NEXT: ret i32 [[TMP0]] 25960 // 25961 // 25962 // CHECK28-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 25963 // CHECK28-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 25964 // CHECK28-NEXT: entry: 25965 // CHECK28-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 25966 // CHECK28-NEXT: [[A:%.*]] = alloca i32, align 4 25967 // CHECK28-NEXT: [[AA:%.*]] = alloca i16, align 2 25968 // CHECK28-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 25969 // CHECK28-NEXT: [[TMP:%.*]] = alloca i64, align 4 25970 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 25971 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 25972 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 25973 // CHECK28-NEXT: [[I:%.*]] = alloca i64, align 8 25974 // CHECK28-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 25975 // CHECK28-NEXT: store i32 0, i32* [[A]], align 4 25976 // CHECK28-NEXT: store i16 0, i16* [[AA]], align 2 25977 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 25978 // CHECK28-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 25979 // CHECK28-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 25980 // CHECK28-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 25981 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 25982 // CHECK28: omp.inner.for.cond: 25983 // CHECK28-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25984 // CHECK28-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22 25985 // CHECK28-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 25986 // CHECK28-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 25987 // CHECK28: omp.inner.for.body: 25988 // CHECK28-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 25989 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 25990 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 25991 // CHECK28-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22 25992 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22 25993 // CHECK28-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 25994 // CHECK28-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22 25995 // CHECK28-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22 25996 // CHECK28-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 25997 // CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 25998 // CHECK28-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 25999 // CHECK28-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22 26000 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 26001 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 26002 // CHECK28-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 26003 // CHECK28-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 26004 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26005 // CHECK28: omp.body.continue: 26006 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26007 // CHECK28: omp.inner.for.inc: 26008 // CHECK28-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 26009 // CHECK28-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 26010 // CHECK28-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22 26011 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 26012 // CHECK28: omp.inner.for.end: 26013 // CHECK28-NEXT: store i64 11, i64* [[I]], align 8 26014 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 26015 // CHECK28-NEXT: ret i32 [[TMP8]] 26016 // 26017 // 26018 // CHECK29-LABEL: define {{[^@]+}}@_Z7get_valv 26019 // CHECK29-SAME: () #[[ATTR0:[0-9]+]] { 26020 // CHECK29-NEXT: entry: 26021 // CHECK29-NEXT: ret i64 0 26022 // 26023 // 26024 // CHECK29-LABEL: define {{[^@]+}}@_Z3fooi 26025 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26026 // CHECK29-NEXT: entry: 26027 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26028 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 26029 // CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2 26030 // CHECK29-NEXT: [[B:%.*]] = alloca [10 x float], align 4 26031 // CHECK29-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 26032 // CHECK29-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 26033 // CHECK29-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 26034 // CHECK29-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 26035 // CHECK29-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 26036 // CHECK29-NEXT: [[TMP:%.*]] = alloca i32, align 4 26037 // CHECK29-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 26038 // CHECK29-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 26039 // CHECK29-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 26040 // CHECK29-NEXT: [[I:%.*]] = alloca i32, align 4 26041 // CHECK29-NEXT: [[K:%.*]] = alloca i64, align 8 26042 // CHECK29-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 26043 // CHECK29-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 26044 // CHECK29-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 26045 // CHECK29-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 26046 // CHECK29-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 26047 // CHECK29-NEXT: [[I7:%.*]] = alloca i32, align 4 26048 // CHECK29-NEXT: [[K8:%.*]] = alloca i64, align 8 26049 // CHECK29-NEXT: [[LIN:%.*]] = alloca i32, align 4 26050 // CHECK29-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 26051 // CHECK29-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 26052 // CHECK29-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 26053 // CHECK29-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 26054 // CHECK29-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 26055 // CHECK29-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 26056 // CHECK29-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 26057 // CHECK29-NEXT: [[IT:%.*]] = alloca i64, align 8 26058 // CHECK29-NEXT: [[LIN27:%.*]] = alloca i32, align 4 26059 // CHECK29-NEXT: [[A28:%.*]] = alloca i32, align 4 26060 // CHECK29-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 26061 // CHECK29-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 26062 // CHECK29-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 26063 // CHECK29-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 26064 // CHECK29-NEXT: [[IT53:%.*]] = alloca i16, align 2 26065 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 26066 // CHECK29-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 26067 // CHECK29-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 26068 // CHECK29-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 26069 // CHECK29-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 26070 // CHECK29-NEXT: [[IT72:%.*]] = alloca i8, align 1 26071 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26072 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 26073 // CHECK29-NEXT: store i16 0, i16* [[AA]], align 2 26074 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26075 // CHECK29-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 26076 // CHECK29-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 26077 // CHECK29-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 26078 // CHECK29-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 26079 // CHECK29-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 26080 // CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 26081 // CHECK29-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 26082 // CHECK29-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 26083 // CHECK29-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 26084 // CHECK29-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 26085 // CHECK29-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 26086 // CHECK29-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 26087 // CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 26088 // CHECK29-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 26089 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 26090 // CHECK29: omp.inner.for.cond: 26091 // CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26092 // CHECK29-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 26093 // CHECK29-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 26094 // CHECK29-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 26095 // CHECK29: omp.inner.for.body: 26096 // CHECK29-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26097 // CHECK29-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 26098 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 26099 // CHECK29-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 26100 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26101 // CHECK29: omp.body.continue: 26102 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26103 // CHECK29: omp.inner.for.inc: 26104 // CHECK29-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26105 // CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 26106 // CHECK29-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26107 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 26108 // CHECK29: omp.inner.for.end: 26109 // CHECK29-NEXT: store i32 33, i32* [[I]], align 4 26110 // CHECK29-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 26111 // CHECK29-NEXT: store i64 [[CALL]], i64* [[K]], align 8 26112 // CHECK29-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 26113 // CHECK29-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 26114 // CHECK29-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 26115 // CHECK29-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 26116 // CHECK29-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 26117 // CHECK29-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 26118 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 26119 // CHECK29: omp.inner.for.cond9: 26120 // CHECK29-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26121 // CHECK29-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 26122 // CHECK29-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 26123 // CHECK29-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 26124 // CHECK29: omp.inner.for.body11: 26125 // CHECK29-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26126 // CHECK29-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 26127 // CHECK29-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 26128 // CHECK29-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 26129 // CHECK29-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 26130 // CHECK29-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26131 // CHECK29-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 26132 // CHECK29-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 26133 // CHECK29-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 26134 // CHECK29-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 26135 // CHECK29-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 26136 // CHECK29-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 26137 // CHECK29-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 26138 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 26139 // CHECK29: omp.body.continue16: 26140 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 26141 // CHECK29: omp.inner.for.inc17: 26142 // CHECK29-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26143 // CHECK29-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 26144 // CHECK29-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26145 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 26146 // CHECK29: omp.inner.for.end19: 26147 // CHECK29-NEXT: store i32 1, i32* [[I7]], align 4 26148 // CHECK29-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 26149 // CHECK29-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 26150 // CHECK29-NEXT: store i32 12, i32* [[LIN]], align 4 26151 // CHECK29-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 26152 // CHECK29-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 26153 // CHECK29-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 26154 // CHECK29-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 26155 // CHECK29-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 26156 // CHECK29-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 26157 // CHECK29-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 26158 // CHECK29-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 26159 // CHECK29-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 26160 // CHECK29-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 26161 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 26162 // CHECK29: omp.inner.for.cond29: 26163 // CHECK29-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26164 // CHECK29-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 26165 // CHECK29-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 26166 // CHECK29-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 26167 // CHECK29: omp.inner.for.body31: 26168 // CHECK29-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26169 // CHECK29-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 26170 // CHECK29-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 26171 // CHECK29-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 26172 // CHECK29-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 26173 // CHECK29-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 26174 // CHECK29-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26175 // CHECK29-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 26176 // CHECK29-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 26177 // CHECK29-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 26178 // CHECK29-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 26179 // CHECK29-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 26180 // CHECK29-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 26181 // CHECK29-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 26182 // CHECK29-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26183 // CHECK29-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 26184 // CHECK29-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 26185 // CHECK29-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 26186 // CHECK29-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 26187 // CHECK29-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 26188 // CHECK29-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 26189 // CHECK29-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 26190 // CHECK29-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 26191 // CHECK29-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 26192 // CHECK29-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 26193 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 26194 // CHECK29: omp.body.continue45: 26195 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 26196 // CHECK29: omp.inner.for.inc46: 26197 // CHECK29-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26198 // CHECK29-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 26199 // CHECK29-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26200 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 26201 // CHECK29: omp.inner.for.end48: 26202 // CHECK29-NEXT: store i64 400, i64* [[IT]], align 8 26203 // CHECK29-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 26204 // CHECK29-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 26205 // CHECK29-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 26206 // CHECK29-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 26207 // CHECK29-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 26208 // CHECK29-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 26209 // CHECK29-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 26210 // CHECK29-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 26211 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 26212 // CHECK29: omp.inner.for.cond54: 26213 // CHECK29-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26214 // CHECK29-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 26215 // CHECK29-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 26216 // CHECK29-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 26217 // CHECK29: omp.inner.for.body56: 26218 // CHECK29-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26219 // CHECK29-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 26220 // CHECK29-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 26221 // CHECK29-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 26222 // CHECK29-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 26223 // CHECK29-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 26224 // CHECK29-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 26225 // CHECK29-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 26226 // CHECK29-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 26227 // CHECK29-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 26228 // CHECK29-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 26229 // CHECK29-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 26230 // CHECK29-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 26231 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 26232 // CHECK29: omp.body.continue64: 26233 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 26234 // CHECK29: omp.inner.for.inc65: 26235 // CHECK29-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26236 // CHECK29-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 26237 // CHECK29-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26238 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 26239 // CHECK29: omp.inner.for.end67: 26240 // CHECK29-NEXT: store i16 22, i16* [[IT53]], align 2 26241 // CHECK29-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 26242 // CHECK29-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 26243 // CHECK29-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 26244 // CHECK29-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 26245 // CHECK29-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 26246 // CHECK29-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 26247 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 26248 // CHECK29: omp.inner.for.cond73: 26249 // CHECK29-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26250 // CHECK29-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 26251 // CHECK29-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 26252 // CHECK29-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 26253 // CHECK29: omp.inner.for.body75: 26254 // CHECK29-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26255 // CHECK29-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 26256 // CHECK29-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 26257 // CHECK29-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 26258 // CHECK29-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 26259 // CHECK29-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 26260 // CHECK29-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 26261 // CHECK29-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 26262 // CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 26263 // CHECK29-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 26264 // CHECK29-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 26265 // CHECK29-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 26266 // CHECK29-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 26267 // CHECK29-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 26268 // CHECK29-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 26269 // CHECK29-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 26270 // CHECK29-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 26271 // CHECK29-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 26272 // CHECK29-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 26273 // CHECK29-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 26274 // CHECK29-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 26275 // CHECK29-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 26276 // CHECK29-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 26277 // CHECK29-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 26278 // CHECK29-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 26279 // CHECK29-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 26280 // CHECK29-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 26281 // CHECK29-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 26282 // CHECK29-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 26283 // CHECK29-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 26284 // CHECK29-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 26285 // CHECK29-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 26286 // CHECK29-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 26287 // CHECK29-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 26288 // CHECK29-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 26289 // CHECK29-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 26290 // CHECK29-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 26291 // CHECK29-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 26292 // CHECK29-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 26293 // CHECK29-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 26294 // CHECK29-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 26295 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 26296 // CHECK29: omp.body.continue97: 26297 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 26298 // CHECK29: omp.inner.for.inc98: 26299 // CHECK29-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26300 // CHECK29-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 26301 // CHECK29-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26302 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 26303 // CHECK29: omp.inner.for.end100: 26304 // CHECK29-NEXT: store i8 96, i8* [[IT72]], align 1 26305 // CHECK29-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 26306 // CHECK29-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 26307 // CHECK29-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 26308 // CHECK29-NEXT: ret i32 [[TMP58]] 26309 // 26310 // 26311 // CHECK29-LABEL: define {{[^@]+}}@_Z3bari 26312 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26313 // CHECK29-NEXT: entry: 26314 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26315 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 26316 // CHECK29-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 26317 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26318 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 26319 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26320 // CHECK29-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 26321 // CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 26322 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 26323 // CHECK29-NEXT: store i32 [[ADD]], i32* [[A]], align 4 26324 // CHECK29-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 26325 // CHECK29-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 26326 // CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 26327 // CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 26328 // CHECK29-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 26329 // CHECK29-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 26330 // CHECK29-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 26331 // CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 26332 // CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 26333 // CHECK29-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 26334 // CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 26335 // CHECK29-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 26336 // CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 26337 // CHECK29-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 26338 // CHECK29-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 26339 // CHECK29-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 26340 // CHECK29-NEXT: ret i32 [[TMP8]] 26341 // 26342 // 26343 // CHECK29-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 26344 // CHECK29-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 26345 // CHECK29-NEXT: entry: 26346 // CHECK29-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 26347 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26348 // CHECK29-NEXT: [[B:%.*]] = alloca i32, align 4 26349 // CHECK29-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 26350 // CHECK29-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 26351 // CHECK29-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 26352 // CHECK29-NEXT: [[TMP:%.*]] = alloca i64, align 8 26353 // CHECK29-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 26354 // CHECK29-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 26355 // CHECK29-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 26356 // CHECK29-NEXT: [[IT:%.*]] = alloca i64, align 8 26357 // CHECK29-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 26358 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26359 // CHECK29-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 26360 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26361 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 26362 // CHECK29-NEXT: store i32 [[ADD]], i32* [[B]], align 4 26363 // CHECK29-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 26364 // CHECK29-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 26365 // CHECK29-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 26366 // CHECK29-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 26367 // CHECK29-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 26368 // CHECK29-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 26369 // CHECK29-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 26370 // CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 26371 // CHECK29-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 26372 // CHECK29-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 26373 // CHECK29-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 26374 // CHECK29-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 26375 // CHECK29-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 26376 // CHECK29-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 26377 // CHECK29-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 26378 // CHECK29-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 26379 // CHECK29-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 26380 // CHECK29-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 26381 // CHECK29: omp_if.then: 26382 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 26383 // CHECK29: omp.inner.for.cond: 26384 // CHECK29-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26385 // CHECK29-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 26386 // CHECK29-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] 26387 // CHECK29-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 26388 // CHECK29: omp.inner.for.body: 26389 // CHECK29-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26390 // CHECK29-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 26391 // CHECK29-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 26392 // CHECK29-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 26393 // CHECK29-NEXT: [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 26394 // CHECK29-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double 26395 // CHECK29-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 26396 // CHECK29-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 26397 // CHECK29-NEXT: store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18 26398 // CHECK29-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26399 // CHECK29-NEXT: [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 26400 // CHECK29-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 26401 // CHECK29-NEXT: store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 26402 // CHECK29-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 26403 // CHECK29-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 26404 // CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 26405 // CHECK29-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 26406 // CHECK29-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18 26407 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26408 // CHECK29: omp.body.continue: 26409 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26410 // CHECK29: omp.inner.for.inc: 26411 // CHECK29-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26412 // CHECK29-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 26413 // CHECK29-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26414 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 26415 // CHECK29: omp.inner.for.end: 26416 // CHECK29-NEXT: br label [[OMP_IF_END:%.*]] 26417 // CHECK29: omp_if.else: 26418 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 26419 // CHECK29: omp.inner.for.cond8: 26420 // CHECK29-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26421 // CHECK29-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 26422 // CHECK29-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] 26423 // CHECK29-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 26424 // CHECK29: omp.inner.for.body10: 26425 // CHECK29-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26426 // CHECK29-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 26427 // CHECK29-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 26428 // CHECK29-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 26429 // CHECK29-NEXT: [[TMP18:%.*]] = load i32, i32* [[B]], align 4 26430 // CHECK29-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double 26431 // CHECK29-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 26432 // CHECK29-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26433 // CHECK29-NEXT: store double [[ADD14]], double* [[A15]], align 8 26434 // CHECK29-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26435 // CHECK29-NEXT: [[TMP19:%.*]] = load double, double* [[A16]], align 8 26436 // CHECK29-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 26437 // CHECK29-NEXT: store double [[INC17]], double* [[A16]], align 8 26438 // CHECK29-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 26439 // CHECK29-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]] 26440 // CHECK29-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]] 26441 // CHECK29-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1 26442 // CHECK29-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 26443 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 26444 // CHECK29: omp.body.continue21: 26445 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 26446 // CHECK29: omp.inner.for.inc22: 26447 // CHECK29-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26448 // CHECK29-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 26449 // CHECK29-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 26450 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] 26451 // CHECK29: omp.inner.for.end24: 26452 // CHECK29-NEXT: br label [[OMP_IF_END]] 26453 // CHECK29: omp_if.end: 26454 // CHECK29-NEXT: store i64 400, i64* [[IT]], align 8 26455 // CHECK29-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] 26456 // CHECK29-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]] 26457 // CHECK29-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1 26458 // CHECK29-NEXT: [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 26459 // CHECK29-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 26460 // CHECK29-NEXT: [[TMP24:%.*]] = load i32, i32* [[B]], align 4 26461 // CHECK29-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] 26462 // CHECK29-NEXT: [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 26463 // CHECK29-NEXT: call void @llvm.stackrestore(i8* [[TMP25]]) 26464 // CHECK29-NEXT: ret i32 [[ADD28]] 26465 // 26466 // 26467 // CHECK29-LABEL: define {{[^@]+}}@_ZL7fstatici 26468 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26469 // CHECK29-NEXT: entry: 26470 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26471 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 26472 // CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2 26473 // CHECK29-NEXT: [[AAA:%.*]] = alloca i8, align 1 26474 // CHECK29-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 26475 // CHECK29-NEXT: [[TMP:%.*]] = alloca i32, align 4 26476 // CHECK29-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 26477 // CHECK29-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 26478 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26479 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 26480 // CHECK29-NEXT: store i16 0, i16* [[AA]], align 2 26481 // CHECK29-NEXT: store i8 0, i8* [[AAA]], align 1 26482 // CHECK29-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 26483 // CHECK29-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 26484 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 26485 // CHECK29-NEXT: ret i32 [[TMP0]] 26486 // 26487 // 26488 // CHECK29-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 26489 // CHECK29-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 26490 // CHECK29-NEXT: entry: 26491 // CHECK29-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26492 // CHECK29-NEXT: [[A:%.*]] = alloca i32, align 4 26493 // CHECK29-NEXT: [[AA:%.*]] = alloca i16, align 2 26494 // CHECK29-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 26495 // CHECK29-NEXT: [[TMP:%.*]] = alloca i64, align 8 26496 // CHECK29-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 26497 // CHECK29-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 26498 // CHECK29-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 26499 // CHECK29-NEXT: [[I:%.*]] = alloca i64, align 8 26500 // CHECK29-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26501 // CHECK29-NEXT: store i32 0, i32* [[A]], align 4 26502 // CHECK29-NEXT: store i16 0, i16* [[AA]], align 2 26503 // CHECK29-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 26504 // CHECK29-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 26505 // CHECK29-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 26506 // CHECK29-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 26507 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 26508 // CHECK29: omp.inner.for.cond: 26509 // CHECK29-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 26510 // CHECK29-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24 26511 // CHECK29-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 26512 // CHECK29-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 26513 // CHECK29: omp.inner.for.body: 26514 // CHECK29-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 26515 // CHECK29-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 26516 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 26517 // CHECK29-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24 26518 // CHECK29-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24 26519 // CHECK29-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 26520 // CHECK29-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24 26521 // CHECK29-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24 26522 // CHECK29-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 26523 // CHECK29-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 26524 // CHECK29-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 26525 // CHECK29-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24 26526 // CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 26527 // CHECK29-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 26528 // CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 26529 // CHECK29-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 26530 // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26531 // CHECK29: omp.body.continue: 26532 // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26533 // CHECK29: omp.inner.for.inc: 26534 // CHECK29-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 26535 // CHECK29-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 26536 // CHECK29-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 26537 // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 26538 // CHECK29: omp.inner.for.end: 26539 // CHECK29-NEXT: store i64 11, i64* [[I]], align 8 26540 // CHECK29-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 26541 // CHECK29-NEXT: ret i32 [[TMP8]] 26542 // 26543 // 26544 // CHECK30-LABEL: define {{[^@]+}}@_Z7get_valv 26545 // CHECK30-SAME: () #[[ATTR0:[0-9]+]] { 26546 // CHECK30-NEXT: entry: 26547 // CHECK30-NEXT: ret i64 0 26548 // 26549 // 26550 // CHECK30-LABEL: define {{[^@]+}}@_Z3fooi 26551 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26552 // CHECK30-NEXT: entry: 26553 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26554 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 26555 // CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2 26556 // CHECK30-NEXT: [[B:%.*]] = alloca [10 x float], align 4 26557 // CHECK30-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 26558 // CHECK30-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 26559 // CHECK30-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 26560 // CHECK30-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 26561 // CHECK30-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 26562 // CHECK30-NEXT: [[TMP:%.*]] = alloca i32, align 4 26563 // CHECK30-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 26564 // CHECK30-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 26565 // CHECK30-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 26566 // CHECK30-NEXT: [[I:%.*]] = alloca i32, align 4 26567 // CHECK30-NEXT: [[K:%.*]] = alloca i64, align 8 26568 // CHECK30-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 26569 // CHECK30-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 26570 // CHECK30-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 26571 // CHECK30-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 26572 // CHECK30-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 26573 // CHECK30-NEXT: [[I7:%.*]] = alloca i32, align 4 26574 // CHECK30-NEXT: [[K8:%.*]] = alloca i64, align 8 26575 // CHECK30-NEXT: [[LIN:%.*]] = alloca i32, align 4 26576 // CHECK30-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 26577 // CHECK30-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 26578 // CHECK30-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 26579 // CHECK30-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 26580 // CHECK30-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 26581 // CHECK30-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 26582 // CHECK30-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 26583 // CHECK30-NEXT: [[IT:%.*]] = alloca i64, align 8 26584 // CHECK30-NEXT: [[LIN27:%.*]] = alloca i32, align 4 26585 // CHECK30-NEXT: [[A28:%.*]] = alloca i32, align 4 26586 // CHECK30-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 26587 // CHECK30-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 26588 // CHECK30-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 26589 // CHECK30-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 26590 // CHECK30-NEXT: [[IT53:%.*]] = alloca i16, align 2 26591 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 26592 // CHECK30-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 26593 // CHECK30-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 26594 // CHECK30-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 26595 // CHECK30-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 26596 // CHECK30-NEXT: [[IT72:%.*]] = alloca i8, align 1 26597 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26598 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 26599 // CHECK30-NEXT: store i16 0, i16* [[AA]], align 2 26600 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26601 // CHECK30-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 26602 // CHECK30-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 26603 // CHECK30-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 26604 // CHECK30-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 26605 // CHECK30-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 26606 // CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 26607 // CHECK30-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 26608 // CHECK30-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] 26609 // CHECK30-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 26610 // CHECK30-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 26611 // CHECK30-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 26612 // CHECK30-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 26613 // CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 26614 // CHECK30-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 26615 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 26616 // CHECK30: omp.inner.for.cond: 26617 // CHECK30-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26618 // CHECK30-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 26619 // CHECK30-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 26620 // CHECK30-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 26621 // CHECK30: omp.inner.for.body: 26622 // CHECK30-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26623 // CHECK30-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 26624 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 26625 // CHECK30-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 26626 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26627 // CHECK30: omp.body.continue: 26628 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26629 // CHECK30: omp.inner.for.inc: 26630 // CHECK30-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26631 // CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 26632 // CHECK30-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 26633 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 26634 // CHECK30: omp.inner.for.end: 26635 // CHECK30-NEXT: store i32 33, i32* [[I]], align 4 26636 // CHECK30-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 26637 // CHECK30-NEXT: store i64 [[CALL]], i64* [[K]], align 8 26638 // CHECK30-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 26639 // CHECK30-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 26640 // CHECK30-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 26641 // CHECK30-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 26642 // CHECK30-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 26643 // CHECK30-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 26644 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 26645 // CHECK30: omp.inner.for.cond9: 26646 // CHECK30-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26647 // CHECK30-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 26648 // CHECK30-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 26649 // CHECK30-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 26650 // CHECK30: omp.inner.for.body11: 26651 // CHECK30-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26652 // CHECK30-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 26653 // CHECK30-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 26654 // CHECK30-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6 26655 // CHECK30-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6 26656 // CHECK30-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26657 // CHECK30-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 26658 // CHECK30-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 26659 // CHECK30-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] 26660 // CHECK30-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6 26661 // CHECK30-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6 26662 // CHECK30-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 26663 // CHECK30-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6 26664 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 26665 // CHECK30: omp.body.continue16: 26666 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 26667 // CHECK30: omp.inner.for.inc17: 26668 // CHECK30-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26669 // CHECK30-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 26670 // CHECK30-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 26671 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] 26672 // CHECK30: omp.inner.for.end19: 26673 // CHECK30-NEXT: store i32 1, i32* [[I7]], align 4 26674 // CHECK30-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 26675 // CHECK30-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 26676 // CHECK30-NEXT: store i32 12, i32* [[LIN]], align 4 26677 // CHECK30-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 26678 // CHECK30-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 26679 // CHECK30-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 26680 // CHECK30-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 26681 // CHECK30-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 26682 // CHECK30-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 26683 // CHECK30-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 26684 // CHECK30-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 26685 // CHECK30-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 26686 // CHECK30-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 26687 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 26688 // CHECK30: omp.inner.for.cond29: 26689 // CHECK30-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26690 // CHECK30-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9 26691 // CHECK30-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] 26692 // CHECK30-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 26693 // CHECK30: omp.inner.for.body31: 26694 // CHECK30-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26695 // CHECK30-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 26696 // CHECK30-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 26697 // CHECK30-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9 26698 // CHECK30-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9 26699 // CHECK30-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 26700 // CHECK30-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26701 // CHECK30-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 26702 // CHECK30-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] 26703 // CHECK30-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 26704 // CHECK30-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 26705 // CHECK30-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9 26706 // CHECK30-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9 26707 // CHECK30-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 26708 // CHECK30-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26709 // CHECK30-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9 26710 // CHECK30-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] 26711 // CHECK30-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 26712 // CHECK30-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 26713 // CHECK30-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9 26714 // CHECK30-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9 26715 // CHECK30-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 26716 // CHECK30-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 26717 // CHECK30-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 26718 // CHECK30-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9 26719 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 26720 // CHECK30: omp.body.continue45: 26721 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 26722 // CHECK30: omp.inner.for.inc46: 26723 // CHECK30-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26724 // CHECK30-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 26725 // CHECK30-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9 26726 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] 26727 // CHECK30: omp.inner.for.end48: 26728 // CHECK30-NEXT: store i64 400, i64* [[IT]], align 8 26729 // CHECK30-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 26730 // CHECK30-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 26731 // CHECK30-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 26732 // CHECK30-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 26733 // CHECK30-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 26734 // CHECK30-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 26735 // CHECK30-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 26736 // CHECK30-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 26737 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 26738 // CHECK30: omp.inner.for.cond54: 26739 // CHECK30-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26740 // CHECK30-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12 26741 // CHECK30-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] 26742 // CHECK30-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 26743 // CHECK30: omp.inner.for.body56: 26744 // CHECK30-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26745 // CHECK30-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 26746 // CHECK30-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 26747 // CHECK30-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 26748 // CHECK30-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12 26749 // CHECK30-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12 26750 // CHECK30-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 26751 // CHECK30-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12 26752 // CHECK30-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12 26753 // CHECK30-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 26754 // CHECK30-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 26755 // CHECK30-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 26756 // CHECK30-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12 26757 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 26758 // CHECK30: omp.body.continue64: 26759 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 26760 // CHECK30: omp.inner.for.inc65: 26761 // CHECK30-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26762 // CHECK30-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 26763 // CHECK30-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12 26764 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] 26765 // CHECK30: omp.inner.for.end67: 26766 // CHECK30-NEXT: store i16 22, i16* [[IT53]], align 2 26767 // CHECK30-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 26768 // CHECK30-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 26769 // CHECK30-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 26770 // CHECK30-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 26771 // CHECK30-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 26772 // CHECK30-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 26773 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 26774 // CHECK30: omp.inner.for.cond73: 26775 // CHECK30-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26776 // CHECK30-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15 26777 // CHECK30-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] 26778 // CHECK30-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 26779 // CHECK30: omp.inner.for.body75: 26780 // CHECK30-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26781 // CHECK30-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 26782 // CHECK30-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 26783 // CHECK30-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 26784 // CHECK30-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15 26785 // CHECK30-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15 26786 // CHECK30-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 26787 // CHECK30-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15 26788 // CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 26789 // CHECK30-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15 26790 // CHECK30-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double 26791 // CHECK30-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 26792 // CHECK30-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 26793 // CHECK30-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15 26794 // CHECK30-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 26795 // CHECK30-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 26796 // CHECK30-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double 26797 // CHECK30-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 26798 // CHECK30-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 26799 // CHECK30-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15 26800 // CHECK30-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 26801 // CHECK30-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 26802 // CHECK30-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 26803 // CHECK30-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 26804 // CHECK30-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15 26805 // CHECK30-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] 26806 // CHECK30-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] 26807 // CHECK30-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 26808 // CHECK30-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 26809 // CHECK30-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 26810 // CHECK30-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15 26811 // CHECK30-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 26812 // CHECK30-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15 26813 // CHECK30-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 26814 // CHECK30-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15 26815 // CHECK30-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 26816 // CHECK30-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15 26817 // CHECK30-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 26818 // CHECK30-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 26819 // CHECK30-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 26820 // CHECK30-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15 26821 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 26822 // CHECK30: omp.body.continue97: 26823 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 26824 // CHECK30: omp.inner.for.inc98: 26825 // CHECK30-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26826 // CHECK30-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 26827 // CHECK30-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15 26828 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] 26829 // CHECK30: omp.inner.for.end100: 26830 // CHECK30-NEXT: store i8 96, i8* [[IT72]], align 1 26831 // CHECK30-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 26832 // CHECK30-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 26833 // CHECK30-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 26834 // CHECK30-NEXT: ret i32 [[TMP58]] 26835 // 26836 // 26837 // CHECK30-LABEL: define {{[^@]+}}@_Z3bari 26838 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26839 // CHECK30-NEXT: entry: 26840 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26841 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 26842 // CHECK30-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 26843 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26844 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 26845 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26846 // CHECK30-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 26847 // CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 26848 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 26849 // CHECK30-NEXT: store i32 [[ADD]], i32* [[A]], align 4 26850 // CHECK30-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 26851 // CHECK30-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 26852 // CHECK30-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 26853 // CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 26854 // CHECK30-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 26855 // CHECK30-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 26856 // CHECK30-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 26857 // CHECK30-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 26858 // CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 26859 // CHECK30-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 26860 // CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 26861 // CHECK30-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 26862 // CHECK30-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 26863 // CHECK30-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 26864 // CHECK30-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 26865 // CHECK30-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 26866 // CHECK30-NEXT: ret i32 [[TMP8]] 26867 // 26868 // 26869 // CHECK30-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 26870 // CHECK30-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 26871 // CHECK30-NEXT: entry: 26872 // CHECK30-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 26873 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26874 // CHECK30-NEXT: [[B:%.*]] = alloca i32, align 4 26875 // CHECK30-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 26876 // CHECK30-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 26877 // CHECK30-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 26878 // CHECK30-NEXT: [[TMP:%.*]] = alloca i64, align 8 26879 // CHECK30-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 26880 // CHECK30-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 26881 // CHECK30-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 26882 // CHECK30-NEXT: [[IT:%.*]] = alloca i64, align 8 26883 // CHECK30-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 26884 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 26885 // CHECK30-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 26886 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 26887 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 26888 // CHECK30-NEXT: store i32 [[ADD]], i32* [[B]], align 4 26889 // CHECK30-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 26890 // CHECK30-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 26891 // CHECK30-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 26892 // CHECK30-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 26893 // CHECK30-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 26894 // CHECK30-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 26895 // CHECK30-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 26896 // CHECK30-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 26897 // CHECK30-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 26898 // CHECK30-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 26899 // CHECK30-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 26900 // CHECK30-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 26901 // CHECK30-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 26902 // CHECK30-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 26903 // CHECK30-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 26904 // CHECK30-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 26905 // CHECK30-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 26906 // CHECK30-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 26907 // CHECK30: omp_if.then: 26908 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 26909 // CHECK30: omp.inner.for.cond: 26910 // CHECK30-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26911 // CHECK30-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18 26912 // CHECK30-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] 26913 // CHECK30-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 26914 // CHECK30: omp.inner.for.body: 26915 // CHECK30-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26916 // CHECK30-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 26917 // CHECK30-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 26918 // CHECK30-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18 26919 // CHECK30-NEXT: [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18 26920 // CHECK30-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double 26921 // CHECK30-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 26922 // CHECK30-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 26923 // CHECK30-NEXT: store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18 26924 // CHECK30-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26925 // CHECK30-NEXT: [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 26926 // CHECK30-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 26927 // CHECK30-NEXT: store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18 26928 // CHECK30-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 26929 // CHECK30-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] 26930 // CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] 26931 // CHECK30-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 26932 // CHECK30-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18 26933 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 26934 // CHECK30: omp.body.continue: 26935 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 26936 // CHECK30: omp.inner.for.inc: 26937 // CHECK30-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26938 // CHECK30-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 26939 // CHECK30-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18 26940 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 26941 // CHECK30: omp.inner.for.end: 26942 // CHECK30-NEXT: br label [[OMP_IF_END:%.*]] 26943 // CHECK30: omp_if.else: 26944 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 26945 // CHECK30: omp.inner.for.cond8: 26946 // CHECK30-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26947 // CHECK30-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 26948 // CHECK30-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] 26949 // CHECK30-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 26950 // CHECK30: omp.inner.for.body10: 26951 // CHECK30-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26952 // CHECK30-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 26953 // CHECK30-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 26954 // CHECK30-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 26955 // CHECK30-NEXT: [[TMP18:%.*]] = load i32, i32* [[B]], align 4 26956 // CHECK30-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double 26957 // CHECK30-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 26958 // CHECK30-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26959 // CHECK30-NEXT: store double [[ADD14]], double* [[A15]], align 8 26960 // CHECK30-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 26961 // CHECK30-NEXT: [[TMP19:%.*]] = load double, double* [[A16]], align 8 26962 // CHECK30-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 26963 // CHECK30-NEXT: store double [[INC17]], double* [[A16]], align 8 26964 // CHECK30-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 26965 // CHECK30-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]] 26966 // CHECK30-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]] 26967 // CHECK30-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1 26968 // CHECK30-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 26969 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 26970 // CHECK30: omp.body.continue21: 26971 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 26972 // CHECK30: omp.inner.for.inc22: 26973 // CHECK30-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 26974 // CHECK30-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 26975 // CHECK30-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 26976 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] 26977 // CHECK30: omp.inner.for.end24: 26978 // CHECK30-NEXT: br label [[OMP_IF_END]] 26979 // CHECK30: omp_if.end: 26980 // CHECK30-NEXT: store i64 400, i64* [[IT]], align 8 26981 // CHECK30-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] 26982 // CHECK30-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]] 26983 // CHECK30-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1 26984 // CHECK30-NEXT: [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 26985 // CHECK30-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 26986 // CHECK30-NEXT: [[TMP24:%.*]] = load i32, i32* [[B]], align 4 26987 // CHECK30-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] 26988 // CHECK30-NEXT: [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 26989 // CHECK30-NEXT: call void @llvm.stackrestore(i8* [[TMP25]]) 26990 // CHECK30-NEXT: ret i32 [[ADD28]] 26991 // 26992 // 26993 // CHECK30-LABEL: define {{[^@]+}}@_ZL7fstatici 26994 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 26995 // CHECK30-NEXT: entry: 26996 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 26997 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 26998 // CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2 26999 // CHECK30-NEXT: [[AAA:%.*]] = alloca i8, align 1 27000 // CHECK30-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 27001 // CHECK30-NEXT: [[TMP:%.*]] = alloca i32, align 4 27002 // CHECK30-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 27003 // CHECK30-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 27004 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27005 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 27006 // CHECK30-NEXT: store i16 0, i16* [[AA]], align 2 27007 // CHECK30-NEXT: store i8 0, i8* [[AAA]], align 1 27008 // CHECK30-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 27009 // CHECK30-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 27010 // CHECK30-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 27011 // CHECK30-NEXT: ret i32 [[TMP0]] 27012 // 27013 // 27014 // CHECK30-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 27015 // CHECK30-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 27016 // CHECK30-NEXT: entry: 27017 // CHECK30-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27018 // CHECK30-NEXT: [[A:%.*]] = alloca i32, align 4 27019 // CHECK30-NEXT: [[AA:%.*]] = alloca i16, align 2 27020 // CHECK30-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 27021 // CHECK30-NEXT: [[TMP:%.*]] = alloca i64, align 8 27022 // CHECK30-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 27023 // CHECK30-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 27024 // CHECK30-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 27025 // CHECK30-NEXT: [[I:%.*]] = alloca i64, align 8 27026 // CHECK30-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27027 // CHECK30-NEXT: store i32 0, i32* [[A]], align 4 27028 // CHECK30-NEXT: store i16 0, i16* [[AA]], align 2 27029 // CHECK30-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 27030 // CHECK30-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 27031 // CHECK30-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 27032 // CHECK30-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 27033 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27034 // CHECK30: omp.inner.for.cond: 27035 // CHECK30-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 27036 // CHECK30-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24 27037 // CHECK30-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 27038 // CHECK30-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27039 // CHECK30: omp.inner.for.body: 27040 // CHECK30-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 27041 // CHECK30-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 27042 // CHECK30-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 27043 // CHECK30-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24 27044 // CHECK30-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24 27045 // CHECK30-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 27046 // CHECK30-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24 27047 // CHECK30-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24 27048 // CHECK30-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 27049 // CHECK30-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 27050 // CHECK30-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 27051 // CHECK30-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24 27052 // CHECK30-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 27053 // CHECK30-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 27054 // CHECK30-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 27055 // CHECK30-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 27056 // CHECK30-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27057 // CHECK30: omp.body.continue: 27058 // CHECK30-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27059 // CHECK30: omp.inner.for.inc: 27060 // CHECK30-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 27061 // CHECK30-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 27062 // CHECK30-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24 27063 // CHECK30-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 27064 // CHECK30: omp.inner.for.end: 27065 // CHECK30-NEXT: store i64 11, i64* [[I]], align 8 27066 // CHECK30-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 27067 // CHECK30-NEXT: ret i32 [[TMP8]] 27068 // 27069 // 27070 // CHECK31-LABEL: define {{[^@]+}}@_Z7get_valv 27071 // CHECK31-SAME: () #[[ATTR0:[0-9]+]] { 27072 // CHECK31-NEXT: entry: 27073 // CHECK31-NEXT: ret i64 0 27074 // 27075 // 27076 // CHECK31-LABEL: define {{[^@]+}}@_Z3fooi 27077 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 27078 // CHECK31-NEXT: entry: 27079 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27080 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 27081 // CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2 27082 // CHECK31-NEXT: [[B:%.*]] = alloca [10 x float], align 4 27083 // CHECK31-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 27084 // CHECK31-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 27085 // CHECK31-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 27086 // CHECK31-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 27087 // CHECK31-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 27088 // CHECK31-NEXT: [[TMP:%.*]] = alloca i32, align 4 27089 // CHECK31-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 27090 // CHECK31-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 27091 // CHECK31-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 27092 // CHECK31-NEXT: [[I:%.*]] = alloca i32, align 4 27093 // CHECK31-NEXT: [[K:%.*]] = alloca i64, align 8 27094 // CHECK31-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 27095 // CHECK31-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 27096 // CHECK31-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 27097 // CHECK31-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 27098 // CHECK31-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 27099 // CHECK31-NEXT: [[I7:%.*]] = alloca i32, align 4 27100 // CHECK31-NEXT: [[K8:%.*]] = alloca i64, align 8 27101 // CHECK31-NEXT: [[LIN:%.*]] = alloca i32, align 4 27102 // CHECK31-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 27103 // CHECK31-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 27104 // CHECK31-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 27105 // CHECK31-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 27106 // CHECK31-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 27107 // CHECK31-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 27108 // CHECK31-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 27109 // CHECK31-NEXT: [[IT:%.*]] = alloca i64, align 8 27110 // CHECK31-NEXT: [[LIN27:%.*]] = alloca i32, align 4 27111 // CHECK31-NEXT: [[A28:%.*]] = alloca i32, align 4 27112 // CHECK31-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 27113 // CHECK31-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 27114 // CHECK31-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 27115 // CHECK31-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 27116 // CHECK31-NEXT: [[IT53:%.*]] = alloca i16, align 2 27117 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 27118 // CHECK31-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 27119 // CHECK31-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 27120 // CHECK31-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 27121 // CHECK31-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 27122 // CHECK31-NEXT: [[IT72:%.*]] = alloca i8, align 1 27123 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27124 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 27125 // CHECK31-NEXT: store i16 0, i16* [[AA]], align 2 27126 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27127 // CHECK31-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 27128 // CHECK31-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 27129 // CHECK31-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 27130 // CHECK31-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 27131 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 27132 // CHECK31-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 27133 // CHECK31-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 27134 // CHECK31-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 27135 // CHECK31-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 27136 // CHECK31-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 27137 // CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 27138 // CHECK31-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 27139 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27140 // CHECK31: omp.inner.for.cond: 27141 // CHECK31-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27142 // CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 27143 // CHECK31-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 27144 // CHECK31-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27145 // CHECK31: omp.inner.for.body: 27146 // CHECK31-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27147 // CHECK31-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 27148 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 27149 // CHECK31-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 27150 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27151 // CHECK31: omp.body.continue: 27152 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27153 // CHECK31: omp.inner.for.inc: 27154 // CHECK31-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27155 // CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 27156 // CHECK31-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27157 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 27158 // CHECK31: omp.inner.for.end: 27159 // CHECK31-NEXT: store i32 33, i32* [[I]], align 4 27160 // CHECK31-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 27161 // CHECK31-NEXT: store i64 [[CALL]], i64* [[K]], align 8 27162 // CHECK31-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 27163 // CHECK31-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 27164 // CHECK31-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 27165 // CHECK31-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 27166 // CHECK31-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 27167 // CHECK31-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 27168 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 27169 // CHECK31: omp.inner.for.cond9: 27170 // CHECK31-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27171 // CHECK31-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 27172 // CHECK31-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 27173 // CHECK31-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 27174 // CHECK31: omp.inner.for.body11: 27175 // CHECK31-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27176 // CHECK31-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 27177 // CHECK31-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 27178 // CHECK31-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 27179 // CHECK31-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 27180 // CHECK31-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27181 // CHECK31-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 27182 // CHECK31-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 27183 // CHECK31-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 27184 // CHECK31-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 27185 // CHECK31-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 27186 // CHECK31-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 27187 // CHECK31-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 27188 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 27189 // CHECK31: omp.body.continue16: 27190 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 27191 // CHECK31: omp.inner.for.inc17: 27192 // CHECK31-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27193 // CHECK31-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 27194 // CHECK31-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27195 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 27196 // CHECK31: omp.inner.for.end19: 27197 // CHECK31-NEXT: store i32 1, i32* [[I7]], align 4 27198 // CHECK31-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 27199 // CHECK31-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 27200 // CHECK31-NEXT: store i32 12, i32* [[LIN]], align 4 27201 // CHECK31-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 27202 // CHECK31-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 27203 // CHECK31-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 27204 // CHECK31-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 27205 // CHECK31-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 27206 // CHECK31-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 27207 // CHECK31-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 27208 // CHECK31-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 27209 // CHECK31-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 27210 // CHECK31-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 27211 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 27212 // CHECK31: omp.inner.for.cond29: 27213 // CHECK31-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27214 // CHECK31-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 27215 // CHECK31-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 27216 // CHECK31-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 27217 // CHECK31: omp.inner.for.body31: 27218 // CHECK31-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27219 // CHECK31-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 27220 // CHECK31-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 27221 // CHECK31-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 27222 // CHECK31-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 27223 // CHECK31-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 27224 // CHECK31-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27225 // CHECK31-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 27226 // CHECK31-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 27227 // CHECK31-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 27228 // CHECK31-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 27229 // CHECK31-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 27230 // CHECK31-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 27231 // CHECK31-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 27232 // CHECK31-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27233 // CHECK31-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 27234 // CHECK31-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 27235 // CHECK31-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 27236 // CHECK31-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 27237 // CHECK31-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 27238 // CHECK31-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 27239 // CHECK31-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 27240 // CHECK31-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 27241 // CHECK31-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 27242 // CHECK31-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 27243 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 27244 // CHECK31: omp.body.continue45: 27245 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 27246 // CHECK31: omp.inner.for.inc46: 27247 // CHECK31-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27248 // CHECK31-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 27249 // CHECK31-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27250 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 27251 // CHECK31: omp.inner.for.end48: 27252 // CHECK31-NEXT: store i64 400, i64* [[IT]], align 8 27253 // CHECK31-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 27254 // CHECK31-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 27255 // CHECK31-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 27256 // CHECK31-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 27257 // CHECK31-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 27258 // CHECK31-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 27259 // CHECK31-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 27260 // CHECK31-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 27261 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 27262 // CHECK31: omp.inner.for.cond54: 27263 // CHECK31-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27264 // CHECK31-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 27265 // CHECK31-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 27266 // CHECK31-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 27267 // CHECK31: omp.inner.for.body56: 27268 // CHECK31-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27269 // CHECK31-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 27270 // CHECK31-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 27271 // CHECK31-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 27272 // CHECK31-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 27273 // CHECK31-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 27274 // CHECK31-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 27275 // CHECK31-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 27276 // CHECK31-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 27277 // CHECK31-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 27278 // CHECK31-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 27279 // CHECK31-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 27280 // CHECK31-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 27281 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 27282 // CHECK31: omp.body.continue64: 27283 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 27284 // CHECK31: omp.inner.for.inc65: 27285 // CHECK31-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27286 // CHECK31-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 27287 // CHECK31-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27288 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 27289 // CHECK31: omp.inner.for.end67: 27290 // CHECK31-NEXT: store i16 22, i16* [[IT53]], align 2 27291 // CHECK31-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 27292 // CHECK31-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 27293 // CHECK31-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 27294 // CHECK31-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 27295 // CHECK31-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 27296 // CHECK31-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 27297 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 27298 // CHECK31: omp.inner.for.cond73: 27299 // CHECK31-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27300 // CHECK31-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 27301 // CHECK31-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 27302 // CHECK31-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 27303 // CHECK31: omp.inner.for.body75: 27304 // CHECK31-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27305 // CHECK31-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 27306 // CHECK31-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 27307 // CHECK31-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 27308 // CHECK31-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 27309 // CHECK31-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 27310 // CHECK31-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 27311 // CHECK31-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 27312 // CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 27313 // CHECK31-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 27314 // CHECK31-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 27315 // CHECK31-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 27316 // CHECK31-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 27317 // CHECK31-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 27318 // CHECK31-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 27319 // CHECK31-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 27320 // CHECK31-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 27321 // CHECK31-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 27322 // CHECK31-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 27323 // CHECK31-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 27324 // CHECK31-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 27325 // CHECK31-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 27326 // CHECK31-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 27327 // CHECK31-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 27328 // CHECK31-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 27329 // CHECK31-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 27330 // CHECK31-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 27331 // CHECK31-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 27332 // CHECK31-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 27333 // CHECK31-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 27334 // CHECK31-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 27335 // CHECK31-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 27336 // CHECK31-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 27337 // CHECK31-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 27338 // CHECK31-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 27339 // CHECK31-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 27340 // CHECK31-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 27341 // CHECK31-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 27342 // CHECK31-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 27343 // CHECK31-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 27344 // CHECK31-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 27345 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 27346 // CHECK31: omp.body.continue97: 27347 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 27348 // CHECK31: omp.inner.for.inc98: 27349 // CHECK31-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27350 // CHECK31-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 27351 // CHECK31-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27352 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 27353 // CHECK31: omp.inner.for.end100: 27354 // CHECK31-NEXT: store i8 96, i8* [[IT72]], align 1 27355 // CHECK31-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 27356 // CHECK31-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 27357 // CHECK31-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 27358 // CHECK31-NEXT: ret i32 [[TMP56]] 27359 // 27360 // 27361 // CHECK31-LABEL: define {{[^@]+}}@_Z3bari 27362 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 27363 // CHECK31-NEXT: entry: 27364 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27365 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 27366 // CHECK31-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 27367 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27368 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 27369 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27370 // CHECK31-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 27371 // CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 27372 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 27373 // CHECK31-NEXT: store i32 [[ADD]], i32* [[A]], align 4 27374 // CHECK31-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 27375 // CHECK31-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 27376 // CHECK31-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 27377 // CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 27378 // CHECK31-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 27379 // CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 27380 // CHECK31-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 27381 // CHECK31-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 27382 // CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 27383 // CHECK31-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 27384 // CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 27385 // CHECK31-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 27386 // CHECK31-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 27387 // CHECK31-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 27388 // CHECK31-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 27389 // CHECK31-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 27390 // CHECK31-NEXT: ret i32 [[TMP8]] 27391 // 27392 // 27393 // CHECK31-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 27394 // CHECK31-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 27395 // CHECK31-NEXT: entry: 27396 // CHECK31-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 27397 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27398 // CHECK31-NEXT: [[B:%.*]] = alloca i32, align 4 27399 // CHECK31-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 27400 // CHECK31-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 27401 // CHECK31-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 27402 // CHECK31-NEXT: [[TMP:%.*]] = alloca i64, align 4 27403 // CHECK31-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 27404 // CHECK31-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 27405 // CHECK31-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 27406 // CHECK31-NEXT: [[IT:%.*]] = alloca i64, align 8 27407 // CHECK31-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 27408 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27409 // CHECK31-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 27410 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27411 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 27412 // CHECK31-NEXT: store i32 [[ADD]], i32* [[B]], align 4 27413 // CHECK31-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 27414 // CHECK31-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 27415 // CHECK31-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 27416 // CHECK31-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 27417 // CHECK31-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 27418 // CHECK31-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 27419 // CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 27420 // CHECK31-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 27421 // CHECK31-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 27422 // CHECK31-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 27423 // CHECK31-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 27424 // CHECK31-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 27425 // CHECK31-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 27426 // CHECK31-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 27427 // CHECK31-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 27428 // CHECK31-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 27429 // CHECK31-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 27430 // CHECK31: omp_if.then: 27431 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27432 // CHECK31: omp.inner.for.cond: 27433 // CHECK31-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27434 // CHECK31-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 27435 // CHECK31-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 27436 // CHECK31-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27437 // CHECK31: omp.inner.for.body: 27438 // CHECK31-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27439 // CHECK31-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 27440 // CHECK31-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 27441 // CHECK31-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 27442 // CHECK31-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 27443 // CHECK31-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double 27444 // CHECK31-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 27445 // CHECK31-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 27446 // CHECK31-NEXT: store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19 27447 // CHECK31-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 27448 // CHECK31-NEXT: [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 27449 // CHECK31-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 27450 // CHECK31-NEXT: store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 27451 // CHECK31-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 27452 // CHECK31-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 27453 // CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 27454 // CHECK31-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 27455 // CHECK31-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19 27456 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27457 // CHECK31: omp.body.continue: 27458 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27459 // CHECK31: omp.inner.for.inc: 27460 // CHECK31-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27461 // CHECK31-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 27462 // CHECK31-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27463 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 27464 // CHECK31: omp.inner.for.end: 27465 // CHECK31-NEXT: br label [[OMP_IF_END:%.*]] 27466 // CHECK31: omp_if.else: 27467 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 27468 // CHECK31: omp.inner.for.cond8: 27469 // CHECK31-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 27470 // CHECK31-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 27471 // CHECK31-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] 27472 // CHECK31-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 27473 // CHECK31: omp.inner.for.body10: 27474 // CHECK31-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 27475 // CHECK31-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 27476 // CHECK31-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 27477 // CHECK31-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 27478 // CHECK31-NEXT: [[TMP17:%.*]] = load i32, i32* [[B]], align 4 27479 // CHECK31-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double 27480 // CHECK31-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 27481 // CHECK31-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 27482 // CHECK31-NEXT: store double [[ADD14]], double* [[A15]], align 4 27483 // CHECK31-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 27484 // CHECK31-NEXT: [[TMP18:%.*]] = load double, double* [[A16]], align 4 27485 // CHECK31-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 27486 // CHECK31-NEXT: store double [[INC17]], double* [[A16]], align 4 27487 // CHECK31-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 27488 // CHECK31-NEXT: [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]] 27489 // CHECK31-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]] 27490 // CHECK31-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1 27491 // CHECK31-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 27492 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 27493 // CHECK31: omp.body.continue21: 27494 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 27495 // CHECK31: omp.inner.for.inc22: 27496 // CHECK31-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 27497 // CHECK31-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 27498 // CHECK31-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 27499 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] 27500 // CHECK31: omp.inner.for.end24: 27501 // CHECK31-NEXT: br label [[OMP_IF_END]] 27502 // CHECK31: omp_if.end: 27503 // CHECK31-NEXT: store i64 400, i64* [[IT]], align 8 27504 // CHECK31-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] 27505 // CHECK31-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]] 27506 // CHECK31-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 27507 // CHECK31-NEXT: [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 27508 // CHECK31-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 27509 // CHECK31-NEXT: [[TMP23:%.*]] = load i32, i32* [[B]], align 4 27510 // CHECK31-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] 27511 // CHECK31-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 27512 // CHECK31-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]) 27513 // CHECK31-NEXT: ret i32 [[ADD28]] 27514 // 27515 // 27516 // CHECK31-LABEL: define {{[^@]+}}@_ZL7fstatici 27517 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 27518 // CHECK31-NEXT: entry: 27519 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27520 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 27521 // CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2 27522 // CHECK31-NEXT: [[AAA:%.*]] = alloca i8, align 1 27523 // CHECK31-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 27524 // CHECK31-NEXT: [[TMP:%.*]] = alloca i32, align 4 27525 // CHECK31-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 27526 // CHECK31-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 27527 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27528 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 27529 // CHECK31-NEXT: store i16 0, i16* [[AA]], align 2 27530 // CHECK31-NEXT: store i8 0, i8* [[AAA]], align 1 27531 // CHECK31-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 27532 // CHECK31-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 27533 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 27534 // CHECK31-NEXT: ret i32 [[TMP0]] 27535 // 27536 // 27537 // CHECK31-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 27538 // CHECK31-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 27539 // CHECK31-NEXT: entry: 27540 // CHECK31-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27541 // CHECK31-NEXT: [[A:%.*]] = alloca i32, align 4 27542 // CHECK31-NEXT: [[AA:%.*]] = alloca i16, align 2 27543 // CHECK31-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 27544 // CHECK31-NEXT: [[TMP:%.*]] = alloca i64, align 4 27545 // CHECK31-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 27546 // CHECK31-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 27547 // CHECK31-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 27548 // CHECK31-NEXT: [[I:%.*]] = alloca i64, align 8 27549 // CHECK31-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27550 // CHECK31-NEXT: store i32 0, i32* [[A]], align 4 27551 // CHECK31-NEXT: store i16 0, i16* [[AA]], align 2 27552 // CHECK31-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 27553 // CHECK31-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 27554 // CHECK31-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 27555 // CHECK31-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 27556 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27557 // CHECK31: omp.inner.for.cond: 27558 // CHECK31-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 27559 // CHECK31-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25 27560 // CHECK31-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 27561 // CHECK31-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27562 // CHECK31: omp.inner.for.body: 27563 // CHECK31-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 27564 // CHECK31-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 27565 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 27566 // CHECK31-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25 27567 // CHECK31-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25 27568 // CHECK31-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 27569 // CHECK31-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25 27570 // CHECK31-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25 27571 // CHECK31-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 27572 // CHECK31-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 27573 // CHECK31-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 27574 // CHECK31-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25 27575 // CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 27576 // CHECK31-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 27577 // CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 27578 // CHECK31-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 27579 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27580 // CHECK31: omp.body.continue: 27581 // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27582 // CHECK31: omp.inner.for.inc: 27583 // CHECK31-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 27584 // CHECK31-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 27585 // CHECK31-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 27586 // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 27587 // CHECK31: omp.inner.for.end: 27588 // CHECK31-NEXT: store i64 11, i64* [[I]], align 8 27589 // CHECK31-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 27590 // CHECK31-NEXT: ret i32 [[TMP8]] 27591 // 27592 // 27593 // CHECK32-LABEL: define {{[^@]+}}@_Z7get_valv 27594 // CHECK32-SAME: () #[[ATTR0:[0-9]+]] { 27595 // CHECK32-NEXT: entry: 27596 // CHECK32-NEXT: ret i64 0 27597 // 27598 // 27599 // CHECK32-LABEL: define {{[^@]+}}@_Z3fooi 27600 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 27601 // CHECK32-NEXT: entry: 27602 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27603 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 27604 // CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2 27605 // CHECK32-NEXT: [[B:%.*]] = alloca [10 x float], align 4 27606 // CHECK32-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 27607 // CHECK32-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 27608 // CHECK32-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 27609 // CHECK32-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 27610 // CHECK32-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 27611 // CHECK32-NEXT: [[TMP:%.*]] = alloca i32, align 4 27612 // CHECK32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 27613 // CHECK32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 27614 // CHECK32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 27615 // CHECK32-NEXT: [[I:%.*]] = alloca i32, align 4 27616 // CHECK32-NEXT: [[K:%.*]] = alloca i64, align 8 27617 // CHECK32-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 27618 // CHECK32-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 27619 // CHECK32-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 27620 // CHECK32-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 27621 // CHECK32-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 27622 // CHECK32-NEXT: [[I7:%.*]] = alloca i32, align 4 27623 // CHECK32-NEXT: [[K8:%.*]] = alloca i64, align 8 27624 // CHECK32-NEXT: [[LIN:%.*]] = alloca i32, align 4 27625 // CHECK32-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 27626 // CHECK32-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 27627 // CHECK32-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 27628 // CHECK32-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 27629 // CHECK32-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 27630 // CHECK32-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 27631 // CHECK32-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 27632 // CHECK32-NEXT: [[IT:%.*]] = alloca i64, align 8 27633 // CHECK32-NEXT: [[LIN27:%.*]] = alloca i32, align 4 27634 // CHECK32-NEXT: [[A28:%.*]] = alloca i32, align 4 27635 // CHECK32-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 27636 // CHECK32-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 27637 // CHECK32-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 27638 // CHECK32-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 27639 // CHECK32-NEXT: [[IT53:%.*]] = alloca i16, align 2 27640 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 27641 // CHECK32-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 27642 // CHECK32-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 27643 // CHECK32-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 27644 // CHECK32-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 27645 // CHECK32-NEXT: [[IT72:%.*]] = alloca i8, align 1 27646 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27647 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 27648 // CHECK32-NEXT: store i16 0, i16* [[AA]], align 2 27649 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27650 // CHECK32-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 27651 // CHECK32-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 27652 // CHECK32-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 27653 // CHECK32-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 27654 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 27655 // CHECK32-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] 27656 // CHECK32-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 27657 // CHECK32-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 27658 // CHECK32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 27659 // CHECK32-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 27660 // CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 27661 // CHECK32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 27662 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27663 // CHECK32: omp.inner.for.cond: 27664 // CHECK32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27665 // CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 27666 // CHECK32-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 27667 // CHECK32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27668 // CHECK32: omp.inner.for.body: 27669 // CHECK32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27670 // CHECK32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 27671 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 27672 // CHECK32-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 27673 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27674 // CHECK32: omp.body.continue: 27675 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27676 // CHECK32: omp.inner.for.inc: 27677 // CHECK32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27678 // CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 27679 // CHECK32-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 27680 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 27681 // CHECK32: omp.inner.for.end: 27682 // CHECK32-NEXT: store i32 33, i32* [[I]], align 4 27683 // CHECK32-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 27684 // CHECK32-NEXT: store i64 [[CALL]], i64* [[K]], align 8 27685 // CHECK32-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 27686 // CHECK32-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 27687 // CHECK32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 27688 // CHECK32-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 27689 // CHECK32-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 27690 // CHECK32-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 27691 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] 27692 // CHECK32: omp.inner.for.cond9: 27693 // CHECK32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27694 // CHECK32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 27695 // CHECK32-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 27696 // CHECK32-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 27697 // CHECK32: omp.inner.for.body11: 27698 // CHECK32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27699 // CHECK32-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 27700 // CHECK32-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] 27701 // CHECK32-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7 27702 // CHECK32-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7 27703 // CHECK32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27704 // CHECK32-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 27705 // CHECK32-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 27706 // CHECK32-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] 27707 // CHECK32-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7 27708 // CHECK32-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7 27709 // CHECK32-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 27710 // CHECK32-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7 27711 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 27712 // CHECK32: omp.body.continue16: 27713 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 27714 // CHECK32: omp.inner.for.inc17: 27715 // CHECK32-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27716 // CHECK32-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 27717 // CHECK32-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 27718 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] 27719 // CHECK32: omp.inner.for.end19: 27720 // CHECK32-NEXT: store i32 1, i32* [[I7]], align 4 27721 // CHECK32-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 27722 // CHECK32-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 27723 // CHECK32-NEXT: store i32 12, i32* [[LIN]], align 4 27724 // CHECK32-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 27725 // CHECK32-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 27726 // CHECK32-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 27727 // CHECK32-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 27728 // CHECK32-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 27729 // CHECK32-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 27730 // CHECK32-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 27731 // CHECK32-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 27732 // CHECK32-NEXT: [[CALL26:%.*]] = call i64 @_Z7get_valv() 27733 // CHECK32-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 27734 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] 27735 // CHECK32: omp.inner.for.cond29: 27736 // CHECK32-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27737 // CHECK32-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10 27738 // CHECK32-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] 27739 // CHECK32-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] 27740 // CHECK32: omp.inner.for.body31: 27741 // CHECK32-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27742 // CHECK32-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 27743 // CHECK32-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] 27744 // CHECK32-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10 27745 // CHECK32-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10 27746 // CHECK32-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 27747 // CHECK32-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27748 // CHECK32-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 27749 // CHECK32-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] 27750 // CHECK32-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] 27751 // CHECK32-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 27752 // CHECK32-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10 27753 // CHECK32-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10 27754 // CHECK32-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 27755 // CHECK32-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27756 // CHECK32-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10 27757 // CHECK32-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] 27758 // CHECK32-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] 27759 // CHECK32-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 27760 // CHECK32-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10 27761 // CHECK32-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10 27762 // CHECK32-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 27763 // CHECK32-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 27764 // CHECK32-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 27765 // CHECK32-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10 27766 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] 27767 // CHECK32: omp.body.continue45: 27768 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] 27769 // CHECK32: omp.inner.for.inc46: 27770 // CHECK32-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27771 // CHECK32-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 27772 // CHECK32-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10 27773 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] 27774 // CHECK32: omp.inner.for.end48: 27775 // CHECK32-NEXT: store i64 400, i64* [[IT]], align 8 27776 // CHECK32-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 27777 // CHECK32-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 27778 // CHECK32-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 27779 // CHECK32-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 27780 // CHECK32-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 27781 // CHECK32-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 27782 // CHECK32-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 27783 // CHECK32-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 27784 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] 27785 // CHECK32: omp.inner.for.cond54: 27786 // CHECK32-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27787 // CHECK32-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13 27788 // CHECK32-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] 27789 // CHECK32-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] 27790 // CHECK32: omp.inner.for.body56: 27791 // CHECK32-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27792 // CHECK32-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 27793 // CHECK32-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] 27794 // CHECK32-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 27795 // CHECK32-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13 27796 // CHECK32-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13 27797 // CHECK32-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 27798 // CHECK32-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13 27799 // CHECK32-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13 27800 // CHECK32-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 27801 // CHECK32-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 27802 // CHECK32-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 27803 // CHECK32-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13 27804 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] 27805 // CHECK32: omp.body.continue64: 27806 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] 27807 // CHECK32: omp.inner.for.inc65: 27808 // CHECK32-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27809 // CHECK32-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 27810 // CHECK32-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13 27811 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] 27812 // CHECK32: omp.inner.for.end67: 27813 // CHECK32-NEXT: store i16 22, i16* [[IT53]], align 2 27814 // CHECK32-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 27815 // CHECK32-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 27816 // CHECK32-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 27817 // CHECK32-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 27818 // CHECK32-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 27819 // CHECK32-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 27820 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] 27821 // CHECK32: omp.inner.for.cond73: 27822 // CHECK32-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27823 // CHECK32-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16 27824 // CHECK32-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] 27825 // CHECK32-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] 27826 // CHECK32: omp.inner.for.body75: 27827 // CHECK32-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27828 // CHECK32-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 27829 // CHECK32-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] 27830 // CHECK32-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 27831 // CHECK32-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16 27832 // CHECK32-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16 27833 // CHECK32-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 27834 // CHECK32-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16 27835 // CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 27836 // CHECK32-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16 27837 // CHECK32-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double 27838 // CHECK32-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 27839 // CHECK32-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float 27840 // CHECK32-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16 27841 // CHECK32-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 27842 // CHECK32-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 27843 // CHECK32-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double 27844 // CHECK32-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 27845 // CHECK32-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float 27846 // CHECK32-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16 27847 // CHECK32-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 27848 // CHECK32-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 27849 // CHECK32-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 27850 // CHECK32-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 27851 // CHECK32-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16 27852 // CHECK32-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] 27853 // CHECK32-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] 27854 // CHECK32-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 27855 // CHECK32-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 27856 // CHECK32-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 27857 // CHECK32-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16 27858 // CHECK32-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 27859 // CHECK32-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16 27860 // CHECK32-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 27861 // CHECK32-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16 27862 // CHECK32-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 27863 // CHECK32-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16 27864 // CHECK32-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 27865 // CHECK32-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 27866 // CHECK32-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 27867 // CHECK32-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16 27868 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] 27869 // CHECK32: omp.body.continue97: 27870 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] 27871 // CHECK32: omp.inner.for.inc98: 27872 // CHECK32-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27873 // CHECK32-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 27874 // CHECK32-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16 27875 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] 27876 // CHECK32: omp.inner.for.end100: 27877 // CHECK32-NEXT: store i8 96, i8* [[IT72]], align 1 27878 // CHECK32-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 27879 // CHECK32-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 27880 // CHECK32-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) 27881 // CHECK32-NEXT: ret i32 [[TMP56]] 27882 // 27883 // 27884 // CHECK32-LABEL: define {{[^@]+}}@_Z3bari 27885 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 27886 // CHECK32-NEXT: entry: 27887 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27888 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 27889 // CHECK32-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 27890 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27891 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 27892 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27893 // CHECK32-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 27894 // CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 27895 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 27896 // CHECK32-NEXT: store i32 [[ADD]], i32* [[A]], align 4 27897 // CHECK32-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 27898 // CHECK32-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 27899 // CHECK32-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 27900 // CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 27901 // CHECK32-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 27902 // CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 27903 // CHECK32-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 27904 // CHECK32-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 27905 // CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 27906 // CHECK32-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 27907 // CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 27908 // CHECK32-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 27909 // CHECK32-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 27910 // CHECK32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 27911 // CHECK32-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 27912 // CHECK32-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 27913 // CHECK32-NEXT: ret i32 [[TMP8]] 27914 // 27915 // 27916 // CHECK32-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 27917 // CHECK32-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 27918 // CHECK32-NEXT: entry: 27919 // CHECK32-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 27920 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 27921 // CHECK32-NEXT: [[B:%.*]] = alloca i32, align 4 27922 // CHECK32-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 27923 // CHECK32-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 27924 // CHECK32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 27925 // CHECK32-NEXT: [[TMP:%.*]] = alloca i64, align 4 27926 // CHECK32-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 27927 // CHECK32-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 27928 // CHECK32-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 27929 // CHECK32-NEXT: [[IT:%.*]] = alloca i64, align 8 27930 // CHECK32-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 27931 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 27932 // CHECK32-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 27933 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 27934 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 27935 // CHECK32-NEXT: store i32 [[ADD]], i32* [[B]], align 4 27936 // CHECK32-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 27937 // CHECK32-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 27938 // CHECK32-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 27939 // CHECK32-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 27940 // CHECK32-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 27941 // CHECK32-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 27942 // CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 27943 // CHECK32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 27944 // CHECK32-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 27945 // CHECK32-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 27946 // CHECK32-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 27947 // CHECK32-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 27948 // CHECK32-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 27949 // CHECK32-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 27950 // CHECK32-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 27951 // CHECK32-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 27952 // CHECK32-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 27953 // CHECK32: omp_if.then: 27954 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 27955 // CHECK32: omp.inner.for.cond: 27956 // CHECK32-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27957 // CHECK32-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19 27958 // CHECK32-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 27959 // CHECK32-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 27960 // CHECK32: omp.inner.for.body: 27961 // CHECK32-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27962 // CHECK32-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 27963 // CHECK32-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 27964 // CHECK32-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19 27965 // CHECK32-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19 27966 // CHECK32-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double 27967 // CHECK32-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 27968 // CHECK32-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 27969 // CHECK32-NEXT: store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19 27970 // CHECK32-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 27971 // CHECK32-NEXT: [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 27972 // CHECK32-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 27973 // CHECK32-NEXT: store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19 27974 // CHECK32-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 27975 // CHECK32-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] 27976 // CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] 27977 // CHECK32-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 27978 // CHECK32-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19 27979 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 27980 // CHECK32: omp.body.continue: 27981 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 27982 // CHECK32: omp.inner.for.inc: 27983 // CHECK32-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27984 // CHECK32-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 27985 // CHECK32-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19 27986 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 27987 // CHECK32: omp.inner.for.end: 27988 // CHECK32-NEXT: br label [[OMP_IF_END:%.*]] 27989 // CHECK32: omp_if.else: 27990 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 27991 // CHECK32: omp.inner.for.cond8: 27992 // CHECK32-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 27993 // CHECK32-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 27994 // CHECK32-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] 27995 // CHECK32-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] 27996 // CHECK32: omp.inner.for.body10: 27997 // CHECK32-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 27998 // CHECK32-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 27999 // CHECK32-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] 28000 // CHECK32-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 28001 // CHECK32-NEXT: [[TMP17:%.*]] = load i32, i32* [[B]], align 4 28002 // CHECK32-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double 28003 // CHECK32-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 28004 // CHECK32-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 28005 // CHECK32-NEXT: store double [[ADD14]], double* [[A15]], align 4 28006 // CHECK32-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 28007 // CHECK32-NEXT: [[TMP18:%.*]] = load double, double* [[A16]], align 4 28008 // CHECK32-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 28009 // CHECK32-NEXT: store double [[INC17]], double* [[A16]], align 4 28010 // CHECK32-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 28011 // CHECK32-NEXT: [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]] 28012 // CHECK32-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]] 28013 // CHECK32-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1 28014 // CHECK32-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 28015 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] 28016 // CHECK32: omp.body.continue21: 28017 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] 28018 // CHECK32: omp.inner.for.inc22: 28019 // CHECK32-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 28020 // CHECK32-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 28021 // CHECK32-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 28022 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] 28023 // CHECK32: omp.inner.for.end24: 28024 // CHECK32-NEXT: br label [[OMP_IF_END]] 28025 // CHECK32: omp_if.end: 28026 // CHECK32-NEXT: store i64 400, i64* [[IT]], align 8 28027 // CHECK32-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] 28028 // CHECK32-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]] 28029 // CHECK32-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 28030 // CHECK32-NEXT: [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 28031 // CHECK32-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 28032 // CHECK32-NEXT: [[TMP23:%.*]] = load i32, i32* [[B]], align 4 28033 // CHECK32-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] 28034 // CHECK32-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 28035 // CHECK32-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]) 28036 // CHECK32-NEXT: ret i32 [[ADD28]] 28037 // 28038 // 28039 // CHECK32-LABEL: define {{[^@]+}}@_ZL7fstatici 28040 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 28041 // CHECK32-NEXT: entry: 28042 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 28043 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 28044 // CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2 28045 // CHECK32-NEXT: [[AAA:%.*]] = alloca i8, align 1 28046 // CHECK32-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 28047 // CHECK32-NEXT: [[TMP:%.*]] = alloca i32, align 4 28048 // CHECK32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 28049 // CHECK32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 28050 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 28051 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 28052 // CHECK32-NEXT: store i16 0, i16* [[AA]], align 2 28053 // CHECK32-NEXT: store i8 0, i8* [[AAA]], align 1 28054 // CHECK32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 28055 // CHECK32-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 28056 // CHECK32-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 28057 // CHECK32-NEXT: ret i32 [[TMP0]] 28058 // 28059 // 28060 // CHECK32-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 28061 // CHECK32-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 28062 // CHECK32-NEXT: entry: 28063 // CHECK32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 28064 // CHECK32-NEXT: [[A:%.*]] = alloca i32, align 4 28065 // CHECK32-NEXT: [[AA:%.*]] = alloca i16, align 2 28066 // CHECK32-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 28067 // CHECK32-NEXT: [[TMP:%.*]] = alloca i64, align 4 28068 // CHECK32-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 28069 // CHECK32-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 28070 // CHECK32-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 28071 // CHECK32-NEXT: [[I:%.*]] = alloca i64, align 8 28072 // CHECK32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 28073 // CHECK32-NEXT: store i32 0, i32* [[A]], align 4 28074 // CHECK32-NEXT: store i16 0, i16* [[AA]], align 2 28075 // CHECK32-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 28076 // CHECK32-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 28077 // CHECK32-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 28078 // CHECK32-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 28079 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 28080 // CHECK32: omp.inner.for.cond: 28081 // CHECK32-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 28082 // CHECK32-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25 28083 // CHECK32-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] 28084 // CHECK32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 28085 // CHECK32: omp.inner.for.body: 28086 // CHECK32-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 28087 // CHECK32-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 28088 // CHECK32-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 28089 // CHECK32-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25 28090 // CHECK32-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25 28091 // CHECK32-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 28092 // CHECK32-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25 28093 // CHECK32-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25 28094 // CHECK32-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 28095 // CHECK32-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 28096 // CHECK32-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 28097 // CHECK32-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25 28098 // CHECK32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 28099 // CHECK32-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 28100 // CHECK32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 28101 // CHECK32-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 28102 // CHECK32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 28103 // CHECK32: omp.body.continue: 28104 // CHECK32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 28105 // CHECK32: omp.inner.for.inc: 28106 // CHECK32-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 28107 // CHECK32-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 28108 // CHECK32-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25 28109 // CHECK32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 28110 // CHECK32: omp.inner.for.end: 28111 // CHECK32-NEXT: store i64 11, i64* [[I]], align 8 28112 // CHECK32-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 28113 // CHECK32-NEXT: ret i32 [[TMP8]] 28114 // 28115